diff --git a/BUILDING.txt b/BUILDING.txt new file mode 100644 index 000000000000..d3c9a1a7f51e --- /dev/null +++ b/BUILDING.txt @@ -0,0 +1,511 @@ +Build instructions for Hadoop + +---------------------------------------------------------------------------------- +Requirements: + +* Unix System +* JDK 1.8 +* Maven 3.3 or later +* Protocol Buffers 3.7.1 (if compiling native code) +* CMake 3.1 or newer (if compiling native code) +* Zlib devel (if compiling native code) +* Cyrus SASL devel (if compiling native code) +* One of the compilers that support thread_local storage: GCC 4.8.1 or later, Visual Studio, + Clang (community version), Clang (version for iOS 9 and later) (if compiling native code) +* openssl devel (if compiling native hadoop-pipes and to get the best HDFS encryption performance) +* Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling fuse_dfs) +* Doxygen ( if compiling libhdfspp and generating the documents ) +* Internet connection for first build (to fetch all Maven and Hadoop dependencies) +* python (for releasedocs) +* bats (for shell code testing) +* Node.js / bower / Ember-cli (for YARN UI v2 building) + +---------------------------------------------------------------------------------- +The easiest way to get an environment with all the appropriate tools is by means +of the provided Docker config. +This requires a recent version of docker (1.4.1 and higher are known to work). + +On Linux / Mac: + Install Docker and run this command: + + $ ./start-build-env.sh + +The prompt which is then presented is located at a mounted version of the source tree +and all required tools for testing and building have been installed and configured. + +Note that from within this docker environment you ONLY have access to the Hadoop source +tree from where you started. So if you need to run + dev-support/bin/test-patch /path/to/my.patch +then the patch must be placed inside the hadoop source tree. + +Known issues: +- On Mac with Boot2Docker the performance on the mounted directory is currently extremely slow. + This is a known problem related to boot2docker on the Mac. + See: + https://github.com/boot2docker/boot2docker/issues/593 + This issue has been resolved as a duplicate, and they point to a new feature for utilizing NFS mounts + as the proposed solution: + https://github.com/boot2docker/boot2docker/issues/64 + An alternative solution to this problem is to install Linux native inside a virtual machine + and run your IDE and Docker etc inside that VM. + +---------------------------------------------------------------------------------- +Installing required packages for clean install of Ubuntu 14.04 LTS Desktop: + +* Oracle JDK 1.8 (preferred) + $ sudo apt-get purge openjdk* + $ sudo apt-get install software-properties-common + $ sudo add-apt-repository ppa:webupd8team/java + $ sudo apt-get update + $ sudo apt-get install oracle-java8-installer +* Maven + $ sudo apt-get -y install maven +* Native libraries + $ sudo apt-get -y install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev libsasl2-dev +* Protocol Buffers 3.7.1 (required to build native code) + $ mkdir -p /opt/protobuf-3.7-src \ + && curl -L -s -S \ + https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ + -o /opt/protobuf-3.7.1.tar.gz \ + && tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C /opt/protobuf-3.7-src \ + && cd /opt/protobuf-3.7-src \ + && ./configure\ + && make install \ + && rm -rf /opt/protobuf-3.7-src + +Optional packages: + +* Snappy compression + $ sudo apt-get install snappy libsnappy-dev +* Intel ISA-L library for erasure coding + Please refer to https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version + (OR https://github.com/01org/isa-l) +* Bzip2 + $ sudo apt-get install bzip2 libbz2-dev +* Linux FUSE + $ sudo apt-get install fuse libfuse-dev +* ZStandard compression + $ sudo apt-get install zstd +* PMDK library for storage class memory(SCM) as HDFS cache backend + Please refer to http://pmem.io/ and https://github.com/pmem/pmdk + +---------------------------------------------------------------------------------- +Maven main modules: + + hadoop (Main Hadoop project) + - hadoop-project (Parent POM for all Hadoop Maven modules. ) + (All plugins & dependencies versions are defined here.) + - hadoop-project-dist (Parent POM for modules that generate distributions.) + - hadoop-annotations (Generates the Hadoop doclet used to generated the Javadocs) + - hadoop-assemblies (Maven assemblies used by the different modules) + - hadoop-maven-plugins (Maven plugins used in project) + - hadoop-build-tools (Build tools like checkstyle, etc.) + - hadoop-common-project (Hadoop Common) + - hadoop-hdfs-project (Hadoop HDFS) + - hadoop-yarn-project (Hadoop YARN) + - hadoop-mapreduce-project (Hadoop MapReduce) + - hadoop-ozone (Hadoop Ozone) + - hadoop-hdds (Hadoop Distributed Data Store) + - hadoop-tools (Hadoop tools like Streaming, Distcp, etc.) + - hadoop-dist (Hadoop distribution assembler) + - hadoop-client-modules (Hadoop client modules) + - hadoop-minicluster (Hadoop minicluster artifacts) + - hadoop-cloud-storage-project (Generates artifacts to access cloud storage like aws, azure, etc.) + +---------------------------------------------------------------------------------- +Where to run Maven from? + + It can be run from any module. The only catch is that if not run from utrunk + all modules that are not part of the build run must be installed in the local + Maven cache or available in a Maven repository. + +---------------------------------------------------------------------------------- +Maven build goals: + + * Clean : mvn clean [-Preleasedocs] + * Compile : mvn compile [-Pnative] + * Run tests : mvn test [-Pnative] [-Pshelltest] + * Create JAR : mvn package + * Run findbugs : mvn compile findbugs:findbugs + * Run checkstyle : mvn compile checkstyle:checkstyle + * Install JAR in M2 cache : mvn install + * Deploy JAR to Maven repo : mvn deploy + * Run clover : mvn test -Pclover [-DcloverLicenseLocation=${user.name}/.clover.license] + * Run Rat : mvn apache-rat:check + * Build javadocs : mvn javadoc:javadoc + * Build distribution : mvn package [-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar][-Preleasedocs][-Pyarn-ui] + * Change Hadoop version : mvn versions:set -DnewVersion=NEWVERSION + + Build options: + + * Use -Pnative to compile/bundle native code + * Use -Pdocs to generate & bundle the documentation in the distribution (using -Pdist) + * Use -Psrc to create a project source TAR.GZ + * Use -Dtar to create a TAR with the distribution (using -Pdist) + * Use -Preleasedocs to include the changelog and release docs (requires Internet connectivity) + * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity) + * Use -DskipShade to disable client jar shading to speed up build times (in + development environments only, not to build release artifacts) + + YARN Application Timeline Service V2 build options: + + YARN Timeline Service v.2 chooses Apache HBase as the primary backing storage. The supported + versions of Apache HBase are 1.2.6 (default) and 2.0.0-beta1. + + * HBase 1.2.6 is used by default to build Hadoop. The official releases are ready to use if you + plan on running Timeline Service v2 with HBase 1.2.6. + + * Use -Dhbase.profile=2.0 to build Hadoop with HBase 2.0.0-beta1. Provide this option if you plan + on running Timeline Service v2 with HBase 2.0. + + + Snappy build options: + + Snappy is a compression library that can be utilized by the native code. + It is currently an optional component, meaning that Hadoop can be built with + or without this dependency. + + * Use -Drequire.snappy to fail the build if libsnappy.so is not found. + If this option is not specified and the snappy library is missing, + we silently build a version of libhadoop.so that cannot make use of snappy. + This option is recommended if you plan on making use of snappy and want + to get more repeatable builds. + + * Use -Dsnappy.prefix to specify a nonstandard location for the libsnappy + header files and library files. You do not need this option if you have + installed snappy using a package manager. + * Use -Dsnappy.lib to specify a nonstandard location for the libsnappy library + files. Similarly to snappy.prefix, you do not need this option if you have + installed snappy using a package manager. + * Use -Dbundle.snappy to copy the contents of the snappy.lib directory into + the final tar file. This option requires that -Dsnappy.lib is also given, + and it ignores the -Dsnappy.prefix option. If -Dsnappy.lib isn't given, the + bundling and building will fail. + + + ZStandard build options: + + ZStandard is a compression library that can be utilized by the native code. + It is currently an optional component, meaning that Hadoop can be built with + or without this dependency. + + * Use -Drequire.zstd to fail the build if libzstd.so is not found. + If this option is not specified and the zstd library is missing. + + * Use -Dzstd.prefix to specify a nonstandard location for the libzstd + header files and library files. You do not need this option if you have + installed zstandard using a package manager. + + * Use -Dzstd.lib to specify a nonstandard location for the libzstd library + files. Similarly to zstd.prefix, you do not need this option if you have + installed using a package manager. + + * Use -Dbundle.zstd to copy the contents of the zstd.lib directory into + the final tar file. This option requires that -Dzstd.lib is also given, + and it ignores the -Dzstd.prefix option. If -Dzstd.lib isn't given, the + bundling and building will fail. + + OpenSSL build options: + + OpenSSL includes a crypto library that can be utilized by the native code. + It is currently an optional component, meaning that Hadoop can be built with + or without this dependency. + + * Use -Drequire.openssl to fail the build if libcrypto.so is not found. + If this option is not specified and the openssl library is missing, + we silently build a version of libhadoop.so that cannot make use of + openssl. This option is recommended if you plan on making use of openssl + and want to get more repeatable builds. + * Use -Dopenssl.prefix to specify a nonstandard location for the libcrypto + header files and library files. You do not need this option if you have + installed openssl using a package manager. + * Use -Dopenssl.lib to specify a nonstandard location for the libcrypto library + files. Similarly to openssl.prefix, you do not need this option if you have + installed openssl using a package manager. + * Use -Dbundle.openssl to copy the contents of the openssl.lib directory into + the final tar file. This option requires that -Dopenssl.lib is also given, + and it ignores the -Dopenssl.prefix option. If -Dopenssl.lib isn't given, the + bundling and building will fail. + + Tests options: + + * Use -DskipTests to skip tests when running the following Maven goals: + 'package', 'install', 'deploy' or 'verify' + * -Dtest=,,.... + * -Dtest.exclude= + * -Dtest.exclude.pattern=**/.java,**/.java + * To run all native unit tests, use: mvn test -Pnative -Dtest=allNative + * To run a specific native unit test, use: mvn test -Pnative -Dtest= + For example, to run test_bulk_crc32, you would use: + mvn test -Pnative -Dtest=test_bulk_crc32 + + Intel ISA-L build options: + + Intel ISA-L is an erasure coding library that can be utilized by the native code. + It is currently an optional component, meaning that Hadoop can be built with + or without this dependency. Note the library is used via dynamic module. Please + reference the official site for the library details. + https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version + (OR https://github.com/01org/isa-l) + + * Use -Drequire.isal to fail the build if libisal.so is not found. + If this option is not specified and the isal library is missing, + we silently build a version of libhadoop.so that cannot make use of ISA-L and + the native raw erasure coders. + This option is recommended if you plan on making use of native raw erasure + coders and want to get more repeatable builds. + * Use -Disal.prefix to specify a nonstandard location for the libisal + library files. You do not need this option if you have installed ISA-L to the + system library path. + * Use -Disal.lib to specify a nonstandard location for the libisal library + files. + * Use -Dbundle.isal to copy the contents of the isal.lib directory into + the final tar file. This option requires that -Disal.lib is also given, + and it ignores the -Disal.prefix option. If -Disal.lib isn't given, the + bundling and building will fail. + + Special plugins: OWASP's dependency-check: + + OWASP's dependency-check plugin will scan the third party dependencies + of this project for known CVEs (security vulnerabilities against them). + It will produce a report in target/dependency-check-report.html. To + invoke, run 'mvn dependency-check:aggregate'. Note that this plugin + requires maven 3.1.1 or greater. + + PMDK library build options: + + The Persistent Memory Development Kit (PMDK), formerly known as NVML, is a growing + collection of libraries which have been developed for various use cases, tuned, + validated to production quality, and thoroughly documented. These libraries are built + on the Direct Access (DAX) feature available in both Linux and Windows, which allows + applications directly load/store access to persistent memory by memory-mapping files + on a persistent memory aware file system. + + It is currently an optional component, meaning that Hadoop can be built without + this dependency. Please Note the library is used via dynamic module. For getting + more details please refer to the official sites: + http://pmem.io/ and https://github.com/pmem/pmdk. + + * -Drequire.pmdk is used to build the project with PMDK libraries forcibly. With this + option provided, the build will fail if libpmem library is not found. If this option + is not given, the build will generate a version of Hadoop with libhadoop.so. + And storage class memory(SCM) backed HDFS cache is still supported without PMDK involved. + Because PMDK can bring better caching write/read performance, it is recommended to build + the project with this option if user plans to use SCM backed HDFS cache. + * -Dpmdk.lib is used to specify a nonstandard location for PMDK libraries if they are not + under /usr/lib or /usr/lib64. + * -Dbundle.pmdk is used to copy the specified libpmem libraries into the distribution tar + package. This option requires that -Dpmdk.lib is specified. With -Dbundle.pmdk provided, + the build will fail if -Dpmdk.lib is not specified. + +---------------------------------------------------------------------------------- +Building components separately + +If you are building a submodule directory, all the hadoop dependencies this +submodule has will be resolved as all other 3rd party dependencies. This is, +from the Maven cache or from a Maven repository (if not available in the cache +or the SNAPSHOT 'timed out'). +An alternative is to run 'mvn install -DskipTests' from Hadoop source top +level once; and then work from the submodule. Keep in mind that SNAPSHOTs +time out after a while, using the Maven '-nsu' will stop Maven from trying +to update SNAPSHOTs from external repos. + +---------------------------------------------------------------------------------- +Importing projects to eclipse + +When you import the project to eclipse, install hadoop-maven-plugins at first. + + $ cd hadoop-maven-plugins + $ mvn install + +Then, generate eclipse project files. + + $ mvn eclipse:eclipse -DskipTests + +At last, import to eclipse by specifying the root directory of the project via +[File] > [Import] > [Existing Projects into Workspace]. + +---------------------------------------------------------------------------------- +Building distributions: + +Create binary distribution without native code and without documentation: + + $ mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true + +Create binary distribution with native code and with documentation: + + $ mvn package -Pdist,native,docs -DskipTests -Dtar + +Create source distribution: + + $ mvn package -Psrc -DskipTests + +Create source and binary distributions with native code and documentation: + + $ mvn package -Pdist,native,docs,src -DskipTests -Dtar + +Create a local staging version of the website (in /tmp/hadoop-site) + + $ mvn clean site -Preleasedocs; mvn site:stage -DstagingDirectory=/tmp/hadoop-site + +Note that the site needs to be built in a second pass after other artifacts. + +---------------------------------------------------------------------------------- +Installing Hadoop + +Look for these HTML files after you build the document by the above commands. + + * Single Node Setup: + hadoop-project-dist/hadoop-common/SingleCluster.html + + * Cluster Setup: + hadoop-project-dist/hadoop-common/ClusterSetup.html + +---------------------------------------------------------------------------------- + +Handling out of memory errors in builds + +---------------------------------------------------------------------------------- + +If the build process fails with an out of memory error, you should be able to fix +it by increasing the memory used by maven which can be done via the environment +variable MAVEN_OPTS. + +Here is an example setting to allocate between 256 MB and 1.5 GB of heap space to +Maven + +export MAVEN_OPTS="-Xms256m -Xmx1536m" + +---------------------------------------------------------------------------------- + +Building on macOS (without Docker) + +---------------------------------------------------------------------------------- +Installing required dependencies for clean install of macOS 10.14: + +* Install Xcode Command Line Tools + $ xcode-select --install +* Install Homebrew + $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +* Install OpenJDK 8 + $ brew tap AdoptOpenJDK/openjdk + $ brew cask install adoptopenjdk8 +* Install maven and tools + $ brew install maven autoconf automake cmake wget +* Install native libraries, only openssl is required to compile native code, +you may optionally install zlib, lz4, etc. + $ brew install openssl +* Protocol Buffers 3.7.1 (required to compile native code) + $ wget https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz + $ mkdir -p protobuf-3.7 && tar zxvf protobuf-java-3.7.1.tar.gz --strip-components 1 -C protobuf-3.7 + $ cd protobuf-3.7 + $ ./configure + $ make + $ make check + $ make install + $ protoc --version + +Note that building Hadoop 3.1.1/3.1.2/3.2.0 native code from source is broken +on macOS. For 3.1.1/3.1.2, you need to manually backport YARN-8622. For 3.2.0, +you need to backport both YARN-8622 and YARN-9487 in order to build native code. + +---------------------------------------------------------------------------------- +Building command example: + +* Create binary distribution with native code but without documentation: + $ mvn package -Pdist,native -DskipTests -Dmaven.javadoc.skip \ + -Dopenssl.prefix=/usr/local/opt/openssl + +Note that the command above manually specified the openssl library and include +path. This is necessary at least for Homebrewed OpenSSL. + +---------------------------------------------------------------------------------- + +Building on Windows + +---------------------------------------------------------------------------------- +Requirements: + +* Windows System +* JDK 1.8 +* Maven 3.0 or later +* Protocol Buffers 3.7.1 +* CMake 3.1 or newer +* Visual Studio 2010 Professional or Higher +* Windows SDK 8.1 (if building CPU rate control for the container executor) +* zlib headers (if building native code bindings for zlib) +* Internet connection for first build (to fetch all Maven and Hadoop dependencies) +* Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These + tools must be present on your PATH. +* Python ( for generation of docs using 'mvn site') + +Unix command-line tools are also included with the Windows Git package which +can be downloaded from http://git-scm.com/downloads + +If using Visual Studio, it must be Professional level or higher. +Do not use Visual Studio Express. It does not support compiling for 64-bit, +which is problematic if running a 64-bit system. + +The Windows SDK 8.1 is available to download at: + +http://msdn.microsoft.com/en-us/windows/bg162891.aspx + +Cygwin is not required. + +---------------------------------------------------------------------------------- +Building: + +Keep the source code tree in a short path to avoid running into problems related +to Windows maximum path length limitation (for example, C:\hdc). + +There is one support command file located in dev-support called win-paths-eg.cmd. +It should be copied somewhere convenient and modified to fit your needs. + +win-paths-eg.cmd sets up the environment for use. You will need to modify this +file. It will put all of the required components in the command path, +configure the bit-ness of the build, and set several optional components. + +Several tests require that the user must have the Create Symbolic Links +privilege. + +All Maven goals are the same as described above with the exception that +native code is built by enabling the 'native-win' Maven profile. -Pnative-win +is enabled by default when building on Windows since the native components +are required (not optional) on Windows. + +If native code bindings for zlib are required, then the zlib headers must be +deployed on the build machine. Set the ZLIB_HOME environment variable to the +directory containing the headers. + +set ZLIB_HOME=C:\zlib-1.2.7 + +At runtime, zlib1.dll must be accessible on the PATH. Hadoop has been tested +with zlib 1.2.7, built using Visual Studio 2010 out of contrib\vstudio\vc10 in +the zlib 1.2.7 source tree. + +http://www.zlib.net/ + +---------------------------------------------------------------------------------- +Building distributions: + + * Build distribution with native code : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true] + +---------------------------------------------------------------------------------- +Running compatibility checks with checkcompatibility.py + +Invoke `./dev-support/bin/checkcompatibility.py` to run Java API Compliance Checker +to compare the public Java APIs of two git objects. This can be used by release +managers to compare the compatibility of a previous and current release. + +As an example, this invocation will check the compatibility of interfaces annotated as Public or LimitedPrivate: + +./dev-support/bin/checkcompatibility.py --annotation org.apache.hadoop.classification.InterfaceAudience.Public --annotation org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include "hadoop.*" branch-2.7.2 trunk + +---------------------------------------------------------------------------------- +Changing the Hadoop version declared returned by VersionInfo + +If for compatibility reasons the version of Hadoop has to be declared as a 2.x release in the information returned by +org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version to the desired version. +For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11 + +If unset, the project version declared in the POM file is used. diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 000000000000..d0d57461e76d --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,258 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +This product bundles various third-party components under other open source +licenses. This section summarizes those components and their licenses. +See licenses/ for text of these licenses. + + +Apache Software Foundation License 2.0 +-------------------------------------- + +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/nvd3-1.8.5.* (css and js files) +hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java +hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java + + +BSD 2-Clause +------------ + +hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c} +hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/util/tree.h +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/compat/{fstatat|openat|unlinkat}.h + + +BSD 3-Clause +------------ + +hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/* +hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc +hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h +hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32_x86.c +hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/d3-3.5.17.min.js + + +MIT License +----------- + +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/angular-1.6.4.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/angular-nvd3-1.0.9.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/angular-route-1.6.4.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.4.1 +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-full-2.0.0.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.4.1.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js +hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js +hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js +hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css +hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/* +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 000000000000..f6715f7beb08 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,34 @@ +Apache Hadoop +Copyright 2006 and onwards The Apache Software Foundation. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Export Control Notice +--------------------- + +This distribution includes cryptographic software. The country in +which you currently reside may have restrictions on the import, +possession, use, and/or re-export to another country, of +encryption software. BEFORE using any encryption software, please +check your country's laws, regulations and policies concerning the +import, possession, or use, and re-export of encryption software, to +see if this is permitted. See for more +information. + +The U.S. Government Department of Commerce, Bureau of Industry and +Security (BIS), has classified this software as Export Commodity +Control Number (ECCN) 5D002.C.1, which includes information security +software using or performing cryptographic functions with asymmetric +algorithms. The form and manner of this Apache Software Foundation +distribution makes it eligible for export under the License Exception +ENC Technology Software Unrestricted (TSU) exception (see the BIS +Export Administration Regulations, Section 740.13) for both object +code and source code. + +The following provides more details on the included cryptographic software: + +This software uses the SSL libraries from the Jetty project written +by mortbay.org. +Hadoop Yarn Server Web Proxy uses the BouncyCastle Java +cryptography APIs written by the Legion of the Bouncy Castle Inc. diff --git a/README.txt b/README.txt new file mode 100644 index 000000000000..8d37cc92a7ec --- /dev/null +++ b/README.txt @@ -0,0 +1,7 @@ +For the latest information about Hadoop, please visit our website at: + + http://hadoop.apache.org/ + +and our wiki, at: + + https://cwiki.apache.org/confluence/display/HADOOP/ diff --git a/dev-support/bin/qbt b/dev-support/bin/qbt new file mode 100755 index 000000000000..fe5e6f689278 --- /dev/null +++ b/dev-support/bin/qbt @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P) +exec "${BINDIR}/yetus-wrapper" qbt --project=hadoop --skip-dir=dev-support "$@" diff --git a/dev-support/bin/smart-apply-patch b/dev-support/bin/smart-apply-patch new file mode 100755 index 000000000000..3fd469f3879e --- /dev/null +++ b/dev-support/bin/smart-apply-patch @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P) +exec "${BINDIR}/yetus-wrapper" smart-apply-patch --project=hadoop "$@" diff --git a/dev-support/bin/test-patch b/dev-support/bin/test-patch new file mode 100755 index 000000000000..8ff8119b3e08 --- /dev/null +++ b/dev-support/bin/test-patch @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P) +exec "${BINDIR}/yetus-wrapper" test-patch --project=hadoop --skip-dir=dev-support "$@" diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper new file mode 100755 index 000000000000..b0f71f105d85 --- /dev/null +++ b/dev-support/bin/yetus-wrapper @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# you must be this high to ride the ride +if [[ -z "${BASH_VERSINFO[0]}" ]] \ + || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \ + || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then + echo "bash v3.2+ is required. Sorry." + exit 1 +fi + +set -o pipefail + +## @description Print a message to stderr +## @audience public +## @stability stable +## @replaceable no +## @param string +function yetus_error +{ + echo "$*" 1>&2 +} + +## @description Given a filename or dir, return the absolute version of it +## @audience public +## @stability stable +## @param directory +## @replaceable no +## @return 0 success +## @return 1 failure +## @return stdout abspath +function yetus_abs +{ + declare obj=$1 + declare dir + declare fn + declare dirret + + if [[ ! -e ${obj} ]]; then + return 1 + elif [[ -d ${obj} ]]; then + dir=${obj} + else + dir=$(dirname -- "${obj}") + fn=$(basename -- "${obj}") + fn="/${fn}" + fi + + dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P) + dirret=$? + if [[ ${dirret} = 0 ]]; then + echo "${dir}${fn}" + return 0 + fi + return 1 +} + +function version_ge() +{ + test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; +} + +WANTED="$1" +shift +ARGV=("$@") + +HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.10.0} +BIN=$(yetus_abs "${BASH_SOURCE-$0}") +BINDIR=$(dirname "${BIN}") + +## HADOOP_YETUS_VERSION >= 0.9.0 the tarball named with apache-yetus prefix +if version_ge "${HADOOP_YETUS_VERSION}" "0.9.0"; then + YETUS_PREFIX=apache-yetus +else + YETUS_PREFIX=yetus +fi + +### +### if YETUS_HOME is set, then try to use it +### +if [[ -n "${YETUS_HOME}" && -x "${YETUS_HOME}/bin/${WANTED}" ]]; then + exec "${YETUS_HOME}/bin/${WANTED}" "${ARGV[@]}" +fi + +# +# this directory is ignored by git and maven +# +HADOOP_PATCHPROCESS=${HADOOP_PATCHPROCESS:-"${BINDIR}/../../patchprocess"} + +if [[ ! -d "${HADOOP_PATCHPROCESS}" ]]; then + mkdir -p "${HADOOP_PATCHPROCESS}" +fi + +mytmpdir=$(yetus_abs "${HADOOP_PATCHPROCESS}") +ret=$? +if [[ ${ret} != 0 ]]; then + yetus_error "yetus-dl: Unable to cwd to ${HADOOP_PATCHPROCESS}" + exit 1 +fi +HADOOP_PATCHPROCESS=${mytmpdir} + +## +## if we've already DL'd it, then short cut +## +if [[ -x "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" ]]; then + exec "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" "${ARGV[@]}" +fi + +## +## need to DL, etc +## + +BASEURL="https://archive.apache.org/dist/yetus/${HADOOP_YETUS_VERSION}/" +TARBALL="${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}-bin.tar" + +GPGBIN=$(command -v gpg) +CURLBIN=$(command -v curl) + +if ! pushd "${HADOOP_PATCHPROCESS}" >/dev/null; then + yetus_error "ERROR: yetus-dl: Cannot pushd to ${HADOOP_PATCHPROCESS}" + exit 1 +fi + +if [[ -n "${CURLBIN}" ]]; then + if ! "${CURLBIN}" -f -s -L -O "${BASEURL}/${TARBALL}.gz"; then + yetus_error "ERROR: yetus-dl: unable to download ${BASEURL}/${TARBALL}.gz" + exit 1 + fi +else + yetus_error "ERROR: yetus-dl requires curl." + exit 1 +fi + +if [[ -n "${GPGBIN}" ]]; then + if ! mkdir -p .gpg; then + yetus_error "ERROR: yetus-dl: Unable to create ${HADOOP_PATCHPROCESS}/.gpg" + exit 1 + fi + if ! chmod -R 700 .gpg; then + yetus_error "ERROR: yetus-dl: Unable to chmod ${HADOOP_PATCHPROCESS}/.gpg" + exit 1 + fi + if ! "${CURLBIN}" -s -L -o KEYS_YETUS https://dist.apache.org/repos/dist/release/yetus/KEYS; then + yetus_error "ERROR: yetus-dl: unable to fetch https://dist.apache.org/repos/dist/release/yetus/KEYS" + exit 1 + fi + if ! "${CURLBIN}" -s -L -O "${BASEURL}/${TARBALL}.gz.asc"; then + yetus_error "ERROR: yetus-dl: unable to fetch ${BASEURL}/${TARBALL}.gz.asc" + exit 1 + fi + if ! "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --import "${HADOOP_PATCHPROCESS}/KEYS_YETUS" >/dev/null 2>&1; then + yetus_error "ERROR: yetus-dl: gpg unable to import ${HADOOP_PATCHPROCESS}/KEYS_YETUS" + exit 1 + fi + if ! "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --verify "${TARBALL}.gz.asc" >/dev/null 2>&1; then + yetus_error "ERROR: yetus-dl: gpg verify of tarball in ${HADOOP_PATCHPROCESS} failed" + exit 1 + fi +fi + +if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then + yetus_error "ERROR: ${TARBALL}.gz is corrupt. Investigate and then remove ${HADOOP_PATCHPROCESS} to try again." + exit 1 +fi + +if [[ -x "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" ]]; then + popd >/dev/null + exec "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" "${ARGV[@]}" +fi + +## +## give up +## +yetus_error "ERROR: ${WANTED} is not part of Apache Yetus ${HADOOP_YETUS_VERSION}" +exit 1 diff --git a/dev-support/byteman/README.md b/dev-support/byteman/README.md new file mode 100644 index 000000000000..9a17fc55be0d --- /dev/null +++ b/dev-support/byteman/README.md @@ -0,0 +1,31 @@ + + +This folder contains example byteman scripts (http://byteman.jboss.org/) to help +Hadoop debuging. + +As the startup script of the hadoop-runner docker image supports byteman +instrumentation it's enough to set the URL of a script to a specific environment +variable to activate it with the docker runs: + + +``` +BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm +``` + +For more info see HADOOP-15656 and HDDS-342 + diff --git a/dev-support/byteman/hadooprpc.btm b/dev-support/byteman/hadooprpc.btm new file mode 100644 index 000000000000..13894fe4ab01 --- /dev/null +++ b/dev-support/byteman/hadooprpc.btm @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This script instruments hadoop rpc layer to print out all the request/response messages to the standard output. +# + +RULE Hadoop RPC request +INTERFACE ^com.google.protobuf.BlockingService +METHOD callBlockingMethod +IF true +DO traceln("--> RPC message request: " + $3.getClass().getSimpleName() + " from " + linked(Thread.currentThread(), "source")); + traceln($3.toString()) +ENDRULE + + +RULE Hadoop RPC response +INTERFACE ^com.google.protobuf.BlockingService +METHOD callBlockingMethod +AT EXIT +IF true +DO traceln("--> RPC message response: " + $3.getClass().getSimpleName() + " to " + unlink(Thread.currentThread(), "source")); + traceln($!.toString()) +ENDRULE + + +RULE Hadoop RPC source IP +CLASS org.apache.hadoop.ipc.Server$RpcCall +METHOD run +IF true +DO link(Thread.currentThread(), "source", $0.connection.toString()) +ENDRULE diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index b31da05bb507..04a8a1aaa1db 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -158,8 +158,10 @@ private void connectToDatanode(DatanodeDetails dn, String encodedToken) // Add credential context to the client call String userName = UserGroupInformation.getCurrentUser().getShortUserName(); - LOG.debug("Nodes in pipeline : {}", pipeline.getNodes().toString()); - LOG.debug("Connecting to server : {}", dn.getIpAddress()); + if (LOG.isDebugEnabled()) { + LOG.debug("Nodes in pipeline : {}", pipeline.getNodes().toString()); + LOG.debug("Connecting to server : {}", dn.getIpAddress()); + } NettyChannelBuilder channelBuilder = NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext() .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) @@ -283,7 +285,9 @@ private XceiverClientReply sendCommandWithRetry( } for (DatanodeDetails dn : datanodeList) { try { - LOG.debug("Executing command " + request + " on datanode " + dn); + if (LOG.isDebugEnabled()) { + LOG.debug("Executing command " + request + " on datanode " + dn); + } // In case the command gets retried on a 2nd datanode, // sendCommandAsyncCall will create a new channel and async stub // in case these don't exist for the specific datanode. @@ -377,9 +381,10 @@ private XceiverClientReply sendCommandAsync( if (!isConnected(channel)) { reconnect(dn, token); } - - LOG.debug("Send command {} to datanode {}", request.getCmdType().toString(), - dn.getNetworkFullPath()); + if (LOG.isDebugEnabled()) { + LOG.debug("Send command {} to datanode {}", + request.getCmdType().toString(), dn.getNetworkFullPath()); + } final CompletableFuture replyFuture = new CompletableFuture<>(); semaphore.acquire(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index ebed288aa52f..b15828a15309 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -36,15 +36,18 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.IOException; +import java.nio.ByteBuffer; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; @@ -75,7 +78,9 @@ public class XceiverClientManager implements Closeable { private boolean isSecurityEnabled; private final boolean topologyAwareRead; /** - * Creates a new XceiverClientManager. + * Creates a new XceiverClientManager for non secured ozone cluster. + * For security enabled ozone cluster, client should use the other constructor + * with a valid ca certificate in pem string format. * * @param conf configuration */ @@ -307,6 +312,10 @@ public HddsProtos.ReplicationType getType() { return HddsProtos.ReplicationType.STAND_ALONE; } + public Function byteBufferToByteStringConversion(){ + return ByteStringConversion.createByteBufferConversion(conf); + } + /** * Get xceiver client metric. */ diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index d234a3f40854..04fababf5044 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -56,7 +57,6 @@ import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; @@ -170,8 +170,10 @@ public Pipeline getPipeline() { @Override public void connect() throws Exception { - LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(), - RatisHelper.toRaftPeerId(pipeline.getFirstNode())); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(), + RatisHelper.toRaftPeerId(pipeline.getFirstNode())); + } // TODO : XceiverClient ratis should pass the config value of // maxOutstandingRequests so as to set the upper bound on max no of async // requests to be handled by raft client @@ -219,39 +221,20 @@ private CompletableFuture sendRequestAsync( try (Scope scope = GlobalTracer.get() .buildSpan("XceiverClientRatis." + request.getCmdType().name()) .startActive(true)) { - ContainerCommandRequestProto finalPayload = - ContainerCommandRequestProto.newBuilder(request) - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - boolean isReadOnlyRequest = HddsUtils.isReadOnly(finalPayload); - ByteString byteString = finalPayload.toByteString(); - if (LOG.isDebugEnabled()) { - LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, - sanitizeForDebug(finalPayload)); + final ContainerCommandRequestMessage message + = ContainerCommandRequestMessage.toMessage( + request, TracingUtil.exportCurrentSpan()); + if (HddsUtils.isReadOnly(request)) { + if (LOG.isDebugEnabled()) { + LOG.debug("sendCommandAsync ReadOnly {}", message); + } + return getClient().sendReadOnlyAsync(message); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("sendCommandAsync {}", message); + } + return getClient().sendAsync(message); } - return isReadOnlyRequest ? - getClient().sendReadOnlyAsync(() -> byteString) : - getClient().sendAsync(() -> byteString); - } - } - - private ContainerCommandRequestProto sanitizeForDebug( - ContainerCommandRequestProto request) { - switch (request.getCmdType()) { - case PutSmallFile: - return request.toBuilder() - .setPutSmallFile(request.getPutSmallFile().toBuilder() - .clearData() - ) - .build(); - case WriteChunk: - return request.toBuilder() - .setWriteChunk(request.getWriteChunk().toBuilder() - .clearData() - ) - .build(); - default: - return request; } } @@ -281,7 +264,9 @@ public XceiverClientReply watchForCommit(long index, long timeout) clientReply.setLogIndex(commitIndex); return clientReply; } - LOG.debug("commit index : {} watch timeout : {}", index, timeout); + if (LOG.isDebugEnabled()) { + LOG.debug("commit index : {} watch timeout : {}", index, timeout); + } RaftClientReply reply; try { CompletableFuture replyFuture = getClient() @@ -333,10 +318,12 @@ public XceiverClientReply sendCommandAsync( metrics.incrPendingContainerOpsMetrics(request.getCmdType()); CompletableFuture containerCommandResponse = raftClientReply.whenComplete((reply, e) -> { - LOG.debug("received reply {} for request: cmdType={} containerID={}" - + " pipelineID={} traceID={} exception: {}", reply, - request.getCmdType(), request.getContainerID(), - request.getPipelineID(), request.getTraceID(), e); + if (LOG.isDebugEnabled()) { + LOG.debug("received reply {} for request: cmdType={} containerID={}" + + " pipelineID={} traceID={} exception: {}", reply, + request.getCmdType(), request.getContainerID(), + request.getPipelineID(), request.getTraceID(), e); + } metrics.decrPendingContainerOpsMetrics(request.getCmdType()); metrics.addContainerOpsLatency(request.getCmdType(), Time.monotonicNowNanos() - requestTime); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java index c97354f5a64c..93b09d9ce591 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java @@ -165,8 +165,10 @@ private void createPipeline(XceiverClientSpi client, Pipeline pipeline) // TODO : Should we change the state on the client side ?? // That makes sense, but it is not needed for the client to work. - LOG.debug("Pipeline creation successful. Pipeline: {}", - pipeline.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Pipeline creation successful. Pipeline: {}", + pipeline.toString()); + } } @Override @@ -210,6 +212,22 @@ public List queryNode(HddsProtos.NodeState poolName); } + @Override + public void decommissionNodes(List hosts) throws IOException { + storageContainerLocationClient.decommissionNodes(hosts); + } + + @Override + public void recommissionNodes(List hosts) throws IOException { + storageContainerLocationClient.recommissionNodes(hosts); + } + + @Override + public void startMaintenanceNodes(List hosts, int endHours) + throws IOException { + storageContainerLocationClient.startMaintenanceNodes(hosts, endHours); + } + /** * Creates a specified replication pipeline. */ @@ -379,7 +397,9 @@ public void closeContainer(long containerId, Pipeline pipeline) throws IOException { XceiverClientSpi client = null; try { - LOG.debug("Close container {}", pipeline); + if (LOG.isDebugEnabled()) { + LOG.debug("Close container {}", pipeline); + } /* TODO: two orders here, revisit this later: 1. close on SCM first, then on data node diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 35807f498420..40bbd93b16f1 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -155,8 +155,10 @@ protected List getChunkInfos() throws IOException { boolean success = false; List chunks; try { - LOG.debug("Initializing BlockInputStream for get key to access {}", - blockID.getContainerID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Initializing BlockInputStream for get key to access {}", + blockID.getContainerID()); + } if (token != null) { UserGroupInformation.getCurrentUser().addToken(token); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 88d178c1c07f..b15ca3f6c85f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -21,7 +21,6 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -392,22 +391,26 @@ ContainerCommandResponseProto> executePutBlock() .equals(responseBlockID.getContainerBlockID())); // updates the bcsId of the block blockID = responseBlockID; - LOG.debug( - "Adding index " + asyncReply.getLogIndex() + " commitMap size " - + commitWatcher.getCommitInfoMapSize() + " flushLength " - + flushPos + " numBuffers " + byteBufferList.size() - + " blockID " + blockID + " bufferPool size" + bufferPool - .getSize() + " currentBufferIndex " + bufferPool - .getCurrentBufferIndex()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Adding index " + asyncReply.getLogIndex() + " commitMap size " + + commitWatcher.getCommitInfoMapSize() + " flushLength " + + flushPos + " numBuffers " + byteBufferList.size() + + " blockID " + blockID + " bufferPool size" + bufferPool + .getSize() + " currentBufferIndex " + bufferPool + .getCurrentBufferIndex()); + } // for standalone protocol, logIndex will always be 0. commitWatcher .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList); } return e; }, responseExecutor).exceptionally(e -> { - LOG.debug( - "putBlock failed for blockID " + blockID + " with exception " + e - .getLocalizedMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "putBlock failed for blockID " + blockID + " with exception " + e + .getLocalizedMessage()); + } CompletionException ce = new CompletionException(e); setIoException(ce); throw ce; @@ -586,7 +589,7 @@ public boolean isClosed() { */ private void writeChunkToContainer(ByteBuffer chunk) throws IOException { int effectiveChunkSize = chunk.remaining(); - ByteString data = ByteStringHelper.getByteString(chunk); + ByteString data = bufferPool.byteStringConversion().apply(chunk); Checksum checksum = new Checksum(checksumType, bytesPerChecksum); ChecksumData checksumData = checksum.computeChecksum(chunk); ChunkInfo chunkInfo = ChunkInfo.newBuilder() @@ -609,9 +612,11 @@ private void writeChunkToContainer(ByteBuffer chunk) throws IOException { } return e; }, responseExecutor).exceptionally(e -> { - LOG.debug( - "writing chunk failed " + chunkInfo.getChunkName() + " blockID " - + blockID + " with exception " + e.getLocalizedMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "writing chunk failed " + chunkInfo.getChunkName() + " blockID " + + blockID + " with exception " + e.getLocalizedMessage()); + } CompletionException ce = new CompletionException(e); setIoException(ce); throw ce; @@ -620,9 +625,11 @@ private void writeChunkToContainer(ByteBuffer chunk) throws IOException { throw new IOException( "Unexpected Storage Container Exception: " + e.toString(), e); } - LOG.debug( - "writing chunk " + chunkInfo.getChunkName() + " blockID " + blockID - + " length " + effectiveChunkSize); + if (LOG.isDebugEnabled()) { + LOG.debug( + "writing chunk " + chunkInfo.getChunkName() + " blockID " + blockID + + " length " + effectiveChunkSize); + } containerBlockData.addChunks(chunkInfo); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java index 17788c70a6c6..6d534579c860 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java @@ -19,10 +19,13 @@ package org.apache.hadoop.hdds.scm.storage; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.ByteStringConversion; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; /** * This class creates and manages pool of n buffers. @@ -33,12 +36,24 @@ public class BufferPool { private int currentBufferIndex; private final int bufferSize; private final int capacity; + private final Function byteStringConversion; public BufferPool(int bufferSize, int capacity) { + this(bufferSize, capacity, + ByteStringConversion.createByteBufferConversion(null)); + } + + public BufferPool(int bufferSize, int capacity, + Function byteStringConversion){ this.capacity = capacity; this.bufferSize = bufferSize; bufferList = new ArrayList<>(capacity); currentBufferIndex = -1; + this.byteStringConversion = byteStringConversion; + } + + public Function byteStringConversion(){ + return byteStringConversion; } public ByteBuffer getCurrentBuffer() { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java index d4606b514c46..1d9d55bfbfbb 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java @@ -131,7 +131,9 @@ public XceiverClientReply watchOnFirstIndex() throws IOException { long index = commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).min() .getAsLong(); - LOG.debug("waiting for first index " + index + " to catch up"); + if (LOG.isDebugEnabled()) { + LOG.debug("waiting for first index " + index + " to catch up"); + } return watchForCommit(index); } else { return null; @@ -153,7 +155,9 @@ public XceiverClientReply watchOnLastIndex() long index = commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).max() .getAsLong(); - LOG.debug("waiting for last flush Index " + index + " to catch up"); + if (LOG.isDebugEnabled()) { + LOG.debug("waiting for last flush Index " + index + " to catch up"); + } return watchForCommit(index); } else { return null; diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml index c7db6794cc0e..4441b69d8683 100644 --- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml @@ -25,4 +25,9 @@ + + + + + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 6ff166a59ff8..d7b20fdd9172 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -423,8 +423,10 @@ public static ObjectName registerWithJmxProperties( InvocationTargetException e) { // Fallback - LOG.trace("Registering MBean {} without additional properties {}", - mBeanName, jmxProperties); + if (LOG.isTraceEnabled()) { + LOG.trace("Registering MBean {} without additional properties {}", + mBeanName, jmxProperties); + } return MBeans.register(serviceName, mBeanName, mBean); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java index 712e8d37e046..8beac1663b2b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java @@ -171,7 +171,9 @@ private void processConfigTagRequest(HttpServletRequest request, Properties properties = config.getAllPropertiesByTag(tag); propMap.put(tag, properties); } else { - LOG.debug("Not a valid tag" + tag); + if (LOG.isDebugEnabled()) { + LOG.debug("Not a valid tag" + tag); + } } } out.write(gson.toJsonTree(propMap).toString()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java new file mode 100644 index 000000000000..07a886a0f9c0 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.ratis; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.ratis.util.JavaUtils; + +import java.util.Objects; +import java.util.function.Supplier; + +/** + * Implementing the {@link Message} interface + * for {@link ContainerCommandRequestProto}. + */ +public final class ContainerCommandRequestMessage implements Message { + public static ContainerCommandRequestMessage toMessage( + ContainerCommandRequestProto request, String traceId) { + final ContainerCommandRequestProto.Builder b + = ContainerCommandRequestProto.newBuilder(request); + if (traceId != null) { + b.setTraceID(traceId); + } + + ByteString data = ByteString.EMPTY; + if (request.getCmdType() == Type.WriteChunk) { + final WriteChunkRequestProto w = request.getWriteChunk(); + data = w.getData(); + b.setWriteChunk(w.toBuilder().clearData()); + } else if (request.getCmdType() == Type.PutSmallFile) { + final PutSmallFileRequestProto p = request.getPutSmallFile(); + data = p.getData(); + b.setPutSmallFile(p.toBuilder().setData(ByteString.EMPTY)); + } + return new ContainerCommandRequestMessage(b.build(), data); + } + + public static ContainerCommandRequestProto toProto( + ByteString bytes, RaftGroupId groupId) + throws InvalidProtocolBufferException { + final int i = 4 + bytes.asReadOnlyByteBuffer().getInt(); + final ContainerCommandRequestProto header + = ContainerCommandRequestProto.parseFrom(bytes.substring(4, i)); + // TODO: setting pipeline id can be avoided if the client is sending it. + // In such case, just have to validate the pipeline id. + final ContainerCommandRequestProto.Builder b = header.toBuilder(); + if (groupId != null) { + b.setPipelineID(groupId.getUuid().toString()); + } + final ByteString data = bytes.substring(i); + if (header.getCmdType() == Type.WriteChunk) { + b.setWriteChunk(b.getWriteChunkBuilder().setData(data)); + } else if (header.getCmdType() == Type.PutSmallFile) { + b.setPutSmallFile(b.getPutSmallFileBuilder().setData(data)); + } + return b.build(); + } + + private final ContainerCommandRequestProto header; + private final ByteString data; + private final Supplier contentSupplier + = JavaUtils.memoize(this::buildContent); + + private ContainerCommandRequestMessage( + ContainerCommandRequestProto header, ByteString data) { + this.header = Objects.requireNonNull(header, "header == null"); + this.data = Objects.requireNonNull(data, "data == null"); + } + + private ByteString buildContent() { + final ByteString headerBytes = header.toByteString(); + return RatisHelper.int2ByteString(headerBytes.size()) + .concat(headerBytes) + .concat(data); + } + + @Override + public ByteString getContent() { + return contentSupplier.get(); + } + + @Override + public String toString() { + return header + ", data.size=" + data.size(); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 3ad4e2e7a2b4..081b4fb766be 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.ratis; +import java.io.DataOutputStream; import java.io.IOException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; @@ -182,7 +183,10 @@ static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, RaftGroup group, RetryPolicy retryPolicy, int maxOutStandingRequest, GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) { - LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, leader, group); + if (LOG.isTraceEnabled()) { + LOG.trace("newRaftClient: {}, leader={}, group={}", + rpcType, leader, group); + } final RaftProperties properties = new RaftProperties(); RaftConfigKeys.Rpc.setType(properties, rpcType); RaftClientConfigKeys.Rpc @@ -272,4 +276,15 @@ static Long getMinReplicatedIndex( return commitInfos.stream().map(RaftProtos.CommitInfoProto::getCommitIndex) .min(Long::compareTo).orElse(null); } + + static ByteString int2ByteString(int n) { + final ByteString.Output out = ByteString.newOutput(); + try(DataOutputStream dataOut = new DataOutputStream(out)) { + dataOut.writeInt(n); + } catch (IOException e) { + throw new IllegalStateException( + "Failed to write integer n = " + n + " to a ByteString.", e); + } + return out.toByteString(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java new file mode 100644 index 000000000000..4608df761228 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import java.nio.ByteBuffer; +import java.util.function.Function; + +/** + * Helper class to create a conversion function from ByteBuffer to ByteString + * based on the property + * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} in the + * Ozone configuration. + */ +public final class ByteStringConversion { + private ByteStringConversion(){} // no instantiation. + + /** + * Creates the conversion function to be used to convert ByteBuffers to + * ByteString instances to be used in protobuf messages. + * + * @param config the Ozone configuration + * @return the conversion function defined by + * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} + * @see
ByteBuffer
+ */ + public static Function createByteBufferConversion( + Configuration config){ + boolean unsafeEnabled = + config!=null && config.getBoolean( + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); + if (unsafeEnabled) { + return buffer -> UnsafeByteOperations.unsafeWrap(buffer); + } else { + return buffer -> { + ByteString retval = ByteString.copyFrom(buffer); + buffer.flip(); + return retval; + }; + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringHelper.java deleted file mode 100644 index ccdf4fac4249..000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringHelper.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Preconditions; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; - -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicBoolean; -/** - * Helper class to perform Unsafe ByteString conversion from byteBuffer or byte - * array depending on the config "ozone.UnsafeByteOperations.enabled". - */ -public final class ByteStringHelper { - private static final AtomicBoolean INITIALIZED = new AtomicBoolean(); - private static volatile boolean isUnsafeByteOperationsEnabled; - - /** - * There is no need to instantiate this class. - */ - private ByteStringHelper() { - } - - public static void init(boolean isUnsafeByteOperation) { - final boolean set = INITIALIZED.compareAndSet(false, true); - if (set) { - ByteStringHelper.isUnsafeByteOperationsEnabled = - isUnsafeByteOperation; - } else { - // already initialized, check values - Preconditions.checkState(isUnsafeByteOperationsEnabled - == isUnsafeByteOperation); - } - } - - private static ByteString copyFrom(ByteBuffer buffer) { - final ByteString bytes = ByteString.copyFrom(buffer); - // flip the buffer so as to read the data starting from pos 0 again - buffer.flip(); - return bytes; - } - - public static ByteString getByteString(ByteBuffer buffer) { - return isUnsafeByteOperationsEnabled ? - UnsafeByteOperations.unsafeWrap(buffer) : copyFrom(buffer); - } - - public static ByteString getByteString(byte[] bytes) { - return isUnsafeByteOperationsEnabled ? - UnsafeByteOperations.unsafeWrap(bytes) : ByteString.copyFrom(bytes); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 226ceda9255a..8238e8469a9f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -161,6 +161,35 @@ ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, List queryNode(HddsProtos.NodeState nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) throws IOException; + /** + * Allows a list of hosts to be decommissioned. The hosts are identified + * by their hostname and optionally port in the format foo.com:port. + * @param hosts A list of hostnames, optionally with port + * @throws IOException + */ + void decommissionNodes(List hosts) throws IOException; + + /** + * Allows a list of hosts in maintenance or decommission states to be placed + * back in service. The hosts are identified by their hostname and optionally + * port in the format foo.com:port. + * @param hosts A list of hostnames, optionally with port + * @throws IOException + */ + void recommissionNodes(List hosts) throws IOException; + + /** + * Place the list of datanodes into maintenance mode. If a non-null endDtm + * is passed, the hosts will automatically exit maintenance mode after the + * given time has passed. The hosts are identified by their hostname and + * optionally port in the format foo.com:port. + * @param hosts A list of hostnames, optionally with port + * @param endHours The number of hours from now which maintenance will end + * @throws IOException + */ + void startMaintenanceNodes(List hosts, int endHours) + throws IOException; + /** * Creates a specified replication pipeline. * @param type - Type diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index fe479ba43829..5c58e92d3c5d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -54,7 +54,7 @@ public class ContainerInfo implements Comparator, mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); mapper .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE); - WRITER = mapper.writer(); + WRITER = mapper.writerWithDefaultPrettyPrinter(); } private HddsProtos.LifeCycleState state; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 0e9afd80292c..579e5f71c791 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -109,7 +109,9 @@ public void add(Node node) { if (add) { LOG.info("Added a new node: " + node.getNetworkFullPath()); - LOG.debug("NetworkTopology became:\n{}", this); + if (LOG.isDebugEnabled()) { + LOG.debug("NetworkTopology became:\n{}", this); + } } } @@ -131,7 +133,9 @@ public void remove(Node node) { netlock.writeLock().unlock(); } LOG.info("Removed a node: " + node.getNetworkFullPath()); - LOG.debug("NetworkTopology became:\n{}", this); + if (LOG.isDebugEnabled()) { + LOG.debug("NetworkTopology became:\n{}", this); + } } /** @@ -558,11 +562,14 @@ private Node chooseNodeInternal(String scope, int leafIndex, ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, mutableExNodes, ancestorGen); } - LOG.debug("Choosing node[index={},random={}] from \"{}\" available nodes" + - " scope=\"{}\", excludedScope=\"{}\", excludeNodes=\"{}\".", - nodeIndex, (leafIndex == -1 ? "true" : "false"), availableNodes, - scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes); - LOG.debug("Chosen node = {}", (ret == null ? "not found" : ret.toString())); + if (LOG.isDebugEnabled()) { + LOG.debug("Choosing node[index={},random={}] from \"{}\" available " + + "nodes, scope=\"{}\", excludedScope=\"{}\", excludeNodes=\"{}\".", + nodeIndex, (leafIndex == -1 ? "true" : "false"), availableNodes, + scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes); + LOG.debug("Chosen node = {}", (ret == null ? "not found" : + ret.toString())); + } return ret; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 1627569b1a0e..2828f6ea41ca 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -41,8 +41,7 @@ */ public final class Pipeline { - private static final Logger LOG = LoggerFactory - .getLogger(Pipeline.class); + private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class); private final PipelineID id; private final ReplicationType type; private final ReplicationFactor factor; @@ -192,8 +191,10 @@ public HddsProtos.Pipeline getProtobufMessage() } } } - LOG.debug("Serialize pipeline {} with nodesInOrder{ }", id.toString(), - nodes); + if (LOG.isDebugEnabled()) { + LOG.debug("Serialize pipeline {} with nodesInOrder{ }", id.toString(), + nodes); + } } return builder.build(); } @@ -339,8 +340,10 @@ public Pipeline build() { nodeIndex--; } } - LOG.debug("Deserialize nodesInOrder {} in pipeline {}", nodesWithOrder, - id.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Deserialize nodesInOrder {} in pipeline {}", + nodesWithOrder, id.toString()); + } pipeline.setNodesInOrder(nodesWithOrder); } else if (nodesInOrder != null){ // This branch is for pipeline clone diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 88db8205a408..91cd40dc3f5a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -112,6 +112,13 @@ List listContainer(long startContainerID, int count) List queryNode(HddsProtos.NodeState state, HddsProtos.QueryScope queryScope, String poolName) throws IOException; + void decommissionNodes(List nodes) throws IOException; + + void recommissionNodes(List nodes) throws IOException; + + void startMaintenanceNodes(List nodes, int endInHours) + throws IOException; + /** * Notify from client when begin or finish creating objects like pipeline * or containers on datanodes. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 01db597dfae1..99941d1c3346 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -53,6 +53,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -256,6 +259,61 @@ public List queryNode(HddsProtos.NodeState } + /** + * Attempts to decommission the list of nodes. + * @param nodes The list of hostnames or hostname:ports to decommission + * @throws IOException + */ + @Override + public void decommissionNodes(List nodes) throws IOException { + Preconditions.checkNotNull(nodes); + DecommissionNodesRequestProto request = + DecommissionNodesRequestProto.newBuilder() + .addAllHosts(nodes) + .build(); + submitRequest(Type.DecommissionNodes, + builder -> builder.setDecommissionNodesRequest(request)); + } + + /** + * Attempts to recommission the list of nodes. + * @param nodes The list of hostnames or hostname:ports to recommission + * @throws IOException + */ + @Override + public void recommissionNodes(List nodes) throws IOException { + Preconditions.checkNotNull(nodes); + RecommissionNodesRequestProto request = + RecommissionNodesRequestProto.newBuilder() + .addAllHosts(nodes) + .build(); + submitRequest(Type.RecommissionNodes, + builder -> builder.setRecommissionNodesRequest(request)); + } + + /** + * Attempts to put the list of nodes into maintenance mode. + * + * @param nodes The list of hostnames or hostname:ports to put into + * maintenance + * @param endInHours A number of hours from now where the nodes will be taken + * out of maintenance automatically. Passing zero will + * allow the nodes to stay in maintenance indefinitely + * @throws IOException + */ + @Override + public void startMaintenanceNodes(List nodes, int endInHours) + throws IOException { + Preconditions.checkNotNull(nodes); + StartMaintenanceNodesRequestProto request = + StartMaintenanceNodesRequestProto.newBuilder() + .addAllHosts(nodes) + .setEndInHours(endInHours) + .build(); + submitRequest(Type.StartMaintenanceNodes, + builder -> builder.setStartMaintenanceNodesRequest(request)); + } + /** * Notify from client that creates object on datanodes. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java index 2742acec5c30..e94808ac9d7d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java @@ -68,7 +68,9 @@ public UserGroupInformation verify(String user, String tokenStr) OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(); try { token.decodeFromUrlString(tokenStr); - LOGGER.debug("Verifying token:{} for user:{} ", token, user); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Verifying token:{} for user:{} ", token, user); + } ByteArrayInputStream buf = new ByteArrayInputStream( token.getIdentifier()); DataInputStream in = new DataInputStream(buf); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java index 83797c3c9607..9acc75ae1707 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java @@ -47,7 +47,9 @@ public Token selectToken(Text service, for (Token token : tokens) { if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) && token.getService().equals(service)) { - LOG.trace("Getting token for service:{}", service); + if (LOG.isTraceEnabled()) { + LOG.trace("Getting token for service:{}", service); + } return (Token) token; } } @@ -66,7 +68,9 @@ public static Token selectBlockToken(Text service, for (Token token : tokens) { if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) && token.getService().equals(service)) { - LOG.trace("Getting token for service:{}", service); + if (LOG.isTraceEnabled()) { + LOG.trace("Getting token for service:{}", service); + } return (Token) token; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java index 13319d16572b..5fdb6f7d9669 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java @@ -236,7 +236,9 @@ public boolean validateGeneralName(int type, String value) { try { final InetAddress byAddress = InetAddress.getByAddress( Hex.decodeHex(value.substring(1))); - LOG.debug("Host Name/IP Address : {}", byAddress.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Host Name/IP Address : {}", byAddress.toString()); + } return true; } catch (UnknownHostException | DecoderException e) { return false; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java index ded50f9f653b..640f5ca0b946 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java @@ -108,8 +108,10 @@ public KeyPair generateKey(int size) throws */ public KeyPair generateKey(int size, String algorithm, String provider) throws NoSuchProviderException, NoSuchAlgorithmException { - LOG.debug("Generating key pair using size:{}, Algorithm:{}, Provider:{}", - size, algorithm, provider); + if (LOG.isDebugEnabled()) { + LOG.debug("Generating key pair using size:{}, Algorithm:{}, Provider:{}", + size, algorithm, provider); + } KeyPairGenerator generator = KeyPairGenerator .getInstance(algorithm, provider); generator.initialize(size); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java index 41ba537c85cc..56d59ea6f1a3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java @@ -45,7 +45,9 @@ public JaegerSpanContext extract(StringBuilder s) { if (value != null && !value.equals("")) { String[] parts = value.split(":"); if (parts.length != 4) { - LOG.debug("MalformedTracerStateString: {}", value); + if (LOG.isDebugEnabled()) { + LOG.debug("MalformedTracerStateString: {}", value); + } throw new MalformedTracerStateStringException(value); } else { String traceId = parts[0]; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java index 88b745e0baf8..ca8d87053f7f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java @@ -102,15 +102,18 @@ public void start() { public class PeriodicalTask implements Runnable { @Override public synchronized void run() { - LOG.debug("Running background service : {}", serviceName); + if (LOG.isDebugEnabled()) { + LOG.debug("Running background service : {}", serviceName); + } BackgroundTaskQueue tasks = getTasks(); if (tasks.isEmpty()) { // No task found, or some problems to init tasks // return and retry in next interval. return; } - - LOG.debug("Number of background tasks to execute : {}", tasks.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Number of background tasks to execute : {}", tasks.size()); + } CompletionService taskCompletionService = new ExecutorCompletionService<>(exec); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java index 99e13cf57dee..6a372d123712 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java @@ -50,7 +50,9 @@ public static void main(String[] args) { "Compiled with protoc " + HDDS_VERSION_INFO.getProtocVersion()); System.out.println( "From source with checksum " + HDDS_VERSION_INFO.getSrcChecksum()); - LOG.debug("This command was run using " + - ClassUtil.findContainingJar(HddsVersionInfo.class)); + if (LOG.isDebugEnabled()) { + LOG.debug("This command was run using " + + ClassUtil.findContainingJar(HddsVersionInfo.class)); + } } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java index 858931fd2010..0598987f9b50 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java @@ -77,7 +77,9 @@ public LevelDBStore(File dbPath, Options options) private void openDB(File dbPath, Options options) throws IOException { if (dbPath.getParentFile().mkdirs()) { - LOG.debug("Db path {} created.", dbPath.getParentFile()); + if (LOG.isDebugEnabled()) { + LOG.debug("Db path {} created.", dbPath.getParentFile()); + } } db = JniDBFactory.factory.open(dbPath, options); if (LOG.isDebugEnabled()) { @@ -370,17 +372,21 @@ private List> getRangeKVs(byte[] startKey, int scanned = filter.getKeysScannedNum(); int hinted = filter.getKeysHintedNum(); if (scanned > 0 || hinted > 0) { - LOG.debug( - "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}", - filter.getClass().getSimpleName(), filter.getKeysScannedNum(), - filter.getKeysHintedNum()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}", + filter.getClass().getSimpleName(), + filter.getKeysScannedNum(), filter.getKeysHintedNum()); + } } } } long end = System.currentTimeMillis(); long timeConsumed = end - start; - LOG.debug("Time consumed for getRangeKVs() is {}ms," - + " result length is {}.", timeConsumed, result.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Time consumed for getRangeKVs() is {}ms," + + " result length is {}.", timeConsumed, result.size()); + } } } return result; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java index 53aeab7c3abb..42b9b77d2d87 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java @@ -83,7 +83,7 @@ public RocksDBCheckpoint createCheckpoint(String parentDir) { Instant end = Instant.now(); long duration = Duration.between(start, end).toMillis(); - LOG.debug("Created checkpoint at " + checkpointPath.toString() + " in " + LOG.info("Created checkpoint at " + checkpointPath.toString() + " in " + duration + " milliseconds"); return new RocksDBCheckpoint( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java index 5e8843a91bef..149743816c20 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java @@ -76,7 +76,8 @@ public long checkpointCreationTimeTaken() { @Override public void cleanupCheckpoint() throws IOException { - LOG.debug("Cleaning up checkpoint at " + checkpointLocation.toString()); + LOG.info("Cleaning up RocksDB checkpoint at " + + checkpointLocation.toString()); FileUtils.deleteDirectory(checkpointLocation.toFile()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java index aa05d88dadab..7be2921b6a11 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java @@ -24,7 +24,7 @@ * CacheKey for the RocksDB table. * @param */ -public class CacheKey { +public class CacheKey implements Comparable { private final KEY key; @@ -53,4 +53,13 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(key); } + + @Override + public int compareTo(Object o) { + if(Objects.equals(key, ((CacheKey)o).key)) { + return 0; + } else { + return key.toString().compareTo((((CacheKey) o).key).toString()); + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java index c3215c475eb9..3e6999a49cfa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -47,7 +48,7 @@ public class TableCacheImpl implements TableCache { - private final ConcurrentHashMap cache; + private final Map cache; private final NavigableSet> epochEntries; private ExecutorService executorService; private CacheCleanupPolicy cleanupPolicy; @@ -55,7 +56,14 @@ public class TableCacheImpl(); + + // As for full table cache only we need elements to be inserted in sorted + // manner, so that list will be easy. For other we can go with Hash map. + if (cleanupPolicy == CacheCleanupPolicy.NEVER) { + cache = new ConcurrentSkipListMap<>(); + } else { + cache = new ConcurrentHashMap<>(); + } epochEntries = new ConcurrentSkipListSet<>(); // Created a singleThreadExecutor, so one cleanup will be running at a // time. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index a3d1c4ab2883..3f7d0b915d5d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -453,6 +453,9 @@ public final class OzoneConfigKeys { "ozone.network.topology.aware.read"; public static final boolean OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT = false; + public static final String OZONE_MANAGER_FAIR_LOCK = "ozone.om.lock.fair"; + public static final boolean OZONE_MANAGER_FAIR_LOCK_DEFAULT = false; + /** * There is no need to instantiate this class. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java new file mode 100644 index 000000000000..7ce643db4711 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Some portions of this file Copyright (c) 2004-2006 Intel Corportation + * and licensed under the BSD license. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.ratis.util.Preconditions; + +import java.nio.ByteBuffer; +import java.util.zip.Checksum; + +/** + * A sub-interface of {@link Checksum} + * with a method to update checksum from a {@link ByteBuffer}. + */ +public interface ChecksumByteBuffer extends Checksum { + /** + * Updates the current checksum with the specified bytes in the buffer. + * Upon return, the buffer's position will be equal to its limit. + * + * @param buffer the bytes to update the checksum with + */ + void update(ByteBuffer buffer); + + @Override + default void update(byte[] b, int off, int len) { + update(ByteBuffer.wrap(b, off, len).asReadOnlyBuffer()); + } + + /** + * An abstract class implementing {@link ChecksumByteBuffer} + * with a 32-bit checksum and a lookup table. + */ + @SuppressWarnings("innerassignment") + abstract class CrcIntTable implements ChecksumByteBuffer { + /** Current CRC value with bit-flipped. */ + private int crc; + + CrcIntTable() { + reset(); + Preconditions.assertTrue(getTable().length == 8 * (1 << 8)); + } + + abstract int[] getTable(); + + @Override + public final long getValue() { + return (~crc) & 0xffffffffL; + } + + @Override + public final void reset() { + crc = 0xffffffff; + } + + @Override + public final void update(int b) { + crc = (crc >>> 8) ^ getTable()[(((crc ^ b) << 24) >>> 24)]; + } + + @Override + public final void update(ByteBuffer b) { + crc = update(crc, b, getTable()); + } + + private static int update(int crc, ByteBuffer b, int[] table) { + for(; b.remaining() > 7;) { + final int c0 = (b.get() ^ crc) & 0xff; + final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff; + final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff; + final int c3 = (b.get() ^ (crc >>> 8)) & 0xff; + crc = (table[0x700 + c0] ^ table[0x600 + c1]) + ^ (table[0x500 + c2] ^ table[0x400 + c3]); + + final int c4 = b.get() & 0xff; + final int c5 = b.get() & 0xff; + final int c6 = b.get() & 0xff; + final int c7 = b.get() & 0xff; + + crc ^= (table[0x300 + c4] ^ table[0x200 + c5]) + ^ (table[0x100 + c6] ^ table[c7]); + } + + // loop unroll - duff's device style + switch (b.remaining()) { + case 7: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + case 6: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + case 5: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + case 4: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + case 3: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + case 2: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + case 1: + crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; + default: // noop + } + + return crc; + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java new file mode 100644 index 000000000000..0d1f6307501a --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java @@ -0,0 +1,556 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.common; + +/** + * Similar to {@link org.apache.hadoop.util.PureJavaCrc32} + * except that this class implement {@link ChecksumByteBuffer}. + */ +final class PureJavaCrc32ByteBuffer extends ChecksumByteBuffer.CrcIntTable { + @Override + int[] getTable() { + return T; + } + + /** + * CRC-32 lookup table generated by the polynomial 0xEDB88320. + * See also org.apache.hadoop.util.TestPureJavaCrc32.Table. + */ + private static final int[] T = { + /* T8_0 */ + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, + /* T8_1 */ + 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, + 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, + 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, + 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, + 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, + 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, + 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A, + 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, + 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, + 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, + 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, + 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, + 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, + 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034, + 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, + 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C, + 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, + 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, + 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, + 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, + 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97, + 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, + 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F, + 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, + 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864, + 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, + 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, + 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, + 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, + 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31, + 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, + 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539, + 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, + 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, + 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, + 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, + 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, + 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, + 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1, + 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, + 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, + 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, + 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, + 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, + 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, + 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, + 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, + 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277, + 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, + 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, + 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, + 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, + 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, + 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, + 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, + 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, + 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, + 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, + 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, + 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, + 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, + 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, + 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, + 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72, + /* T8_2 */ + 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, + 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, + 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, + 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, + 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, + 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, + 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91, + 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, + 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, + 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, + 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, + 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, + 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, + 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315, + 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, + 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD, + 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, + 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, + 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, + 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, + 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, + 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, + 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151, + 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, + 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579, + 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, + 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, + 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, + 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, + 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5, + 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, + 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D, + 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, + 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05, + 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, + 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, + 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, + 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, + 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711, + 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, + 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339, + 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, + 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, + 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, + 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, + 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, + 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, + 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, + 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, + 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, + 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, + 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, + 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, + 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, + 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, + 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, + 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, + 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, + 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, + 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, + 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, + 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, + 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, + 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED, + /* T8_3 */ + 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, + 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, + 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, + 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, + 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, + 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, + 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E, + 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, + 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, + 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, + 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, + 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, + 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, + 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018, + 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, + 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7, + 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, + 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, + 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, + 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, + 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C, + 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, + 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3, + 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, + 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED, + 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, + 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, + 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, + 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, + 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825, + 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, + 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA, + 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, + 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82, + 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, + 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, + 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, + 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, + 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, + 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, + 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, + 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, + 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, + 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, + 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, + 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, + 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, + 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC, + 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, + 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, + 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, + 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, + 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, + 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, + 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, + 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, + 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, + 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, + 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, + 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, + 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, + 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, + 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, + 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1, + /* T8_4 */ + 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, + 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, + 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, + 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, + 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, + 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, + 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693, + 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, + 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, + 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, + 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, + 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, + 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, + 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496, + 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, + 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57, + 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, + 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, + 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, + 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, + 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, + 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, + 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA, + 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, + 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D, + 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, + 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, + 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, + 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, + 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF, + 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, + 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E, + 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, + 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82, + 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, + 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, + 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, + 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, + 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601, + 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, + 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546, + 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, + 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, + 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, + 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, + 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, + 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, + 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5, + 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, + 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, + 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, + 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, + 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, + 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, + 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, + 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, + 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, + 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, + 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, + 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, + 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, + 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, + 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, + 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C, + /* T8_5 */ + 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, + 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, + 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, + 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, + 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, + 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, + 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258, + 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, + 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, + 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, + 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, + 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, + 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, + 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715, + 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, + 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578, + 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, + 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, + 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, + 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, + 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, + 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, + 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834, + 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, + 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4, + 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, + 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, + 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, + 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, + 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, + 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, + 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14, + 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, + 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460, + 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, + 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, + 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, + 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, + 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680, + 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, + 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340, + 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, + 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, + 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, + 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, + 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, + 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, + 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0, + 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, + 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, + 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, + 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, + 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, + 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, + 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, + 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, + 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, + 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, + 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, + 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, + 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, + 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, + 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, + 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC, + /* T8_6 */ + 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, + 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, + 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, + 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, + 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, + 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, + 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834, + 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, + 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, + 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, + 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, + 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, + 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, + 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC, + 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, + 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB, + 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, + 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, + 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, + 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, + 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A, + 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, + 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D, + 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, + 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678, + 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, + 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, + 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, + 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, + 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, + 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, + 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2, + 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, + 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, + 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, + 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, + 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, + 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, + 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, + 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, + 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3, + 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, + 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, + 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, + 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, + 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, + 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, + 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49, + 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, + 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, + 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, + 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, + 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, + 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, + 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, + 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, + 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, + 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, + 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, + 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, + 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, + 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, + 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, + 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30, + /* T8_7 */ + 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, + 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, + 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, + 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, + 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, + 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, + 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D, + 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, + 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, + 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, + 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, + 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, + 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, + 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64, + 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, + 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1, + 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, + 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, + 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, + 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, + 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425, + 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, + 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90, + 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, + 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758, + 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, + 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, + 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, + 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, + 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889, + 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, + 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C, + 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, + 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, + 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, + 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, + 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, + 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, + 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, + 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, + 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F, + 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, + 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, + 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, + 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, + 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, + 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, + 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, + 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, + 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, + 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, + 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, + 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, + 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, + 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, + 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, + 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, + 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, + 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, + 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, + 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, + 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, + 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, + 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 + }; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java new file mode 100644 index 000000000000..1c443575f817 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java @@ -0,0 +1,559 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Some portions of this file Copyright (c) 2004-2006 Intel Corportation + * and licensed under the BSD license. + */ +package org.apache.hadoop.ozone.common; + +/** + * Similar to {@link org.apache.hadoop.util.PureJavaCrc32C} + * except that this class implement {@link ChecksumByteBuffer}. + */ +final class PureJavaCrc32CByteBuffer extends ChecksumByteBuffer.CrcIntTable { + @Override + int[] getTable() { + return T; + } + + /** + * CRC-32C lookup table generated by the polynomial 0x82F63B78. + * See also org.apache.hadoop.util.TestPureJavaCrc32.Table. + */ + private static final int[] T = { + /* T8_0 */ + 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, + 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, + 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, + 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, + 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, + 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, + 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, + 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, + 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, + 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, + 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, + 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, + 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, + 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, + 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, + 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, + 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, + 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, + 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, + 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, + 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, + 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, + 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, + 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, + 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, + 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, + 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, + 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, + 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, + 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, + 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, + 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, + 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, + 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, + 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, + 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, + 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, + 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, + 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, + 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, + 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, + 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, + 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, + 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, + 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, + 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, + 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, + 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, + 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, + 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, + 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, + 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, + 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, + 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, + 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, + 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, + 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, + 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, + 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, + 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, + 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, + 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, + 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, + 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, + /* T8_1 */ + 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, + 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, + 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, + 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, + 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, + 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, + 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, + 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, + 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, + 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, + 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, + 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, + 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, + 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, + 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, + 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, + 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, + 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, + 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, + 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, + 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, + 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, + 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, + 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, + 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, + 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, + 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, + 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, + 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, + 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, + 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, + 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, + 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, + 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, + 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, + 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, + 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, + 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, + 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, + 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, + 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, + 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, + 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, + 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, + 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, + 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, + 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, + 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, + 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, + 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, + 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, + 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, + 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, + 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, + 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, + 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, + 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, + 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, + 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, + 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, + 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, + 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, + 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, + 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483, + /* T8_2 */ + 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, + 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, + 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, + 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, + 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, + 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, + 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, + 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, + 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, + 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, + 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, + 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, + 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, + 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, + 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, + 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, + 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, + 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, + 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, + 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, + 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, + 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, + 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, + 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, + 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, + 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, + 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, + 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, + 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, + 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, + 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, + 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, + 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, + 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, + 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, + 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, + 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, + 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, + 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, + 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, + 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, + 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, + 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, + 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, + 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, + 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, + 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, + 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, + 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, + 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, + 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, + 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, + 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, + 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, + 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, + 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, + 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, + 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, + 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, + 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, + 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, + 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, + 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, + 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8, + /* T8_3 */ + 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, + 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, + 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, + 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, + 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, + 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, + 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, + 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, + 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, + 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, + 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, + 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, + 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, + 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, + 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, + 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, + 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, + 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, + 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, + 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, + 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, + 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, + 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, + 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, + 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, + 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, + 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, + 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, + 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, + 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, + 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, + 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, + 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, + 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, + 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, + 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, + 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, + 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, + 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, + 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, + 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, + 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, + 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, + 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, + 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, + 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, + 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, + 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, + 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, + 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, + 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, + 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, + 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, + 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, + 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, + 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, + 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, + 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, + 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, + 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, + 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, + 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, + 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, + 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842, + /* T8_4 */ + 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, + 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, + 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, + 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, + 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, + 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, + 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, + 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, + 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, + 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, + 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, + 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, + 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, + 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, + 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, + 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, + 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, + 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, + 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, + 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, + 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, + 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, + 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, + 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, + 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, + 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, + 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, + 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, + 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, + 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, + 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, + 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, + 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, + 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, + 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, + 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, + 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, + 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, + 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, + 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, + 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, + 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, + 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, + 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, + 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, + 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, + 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, + 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, + 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, + 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, + 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, + 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, + 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, + 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, + 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, + 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, + 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, + 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, + 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, + 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, + 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, + 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, + 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, + 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3, + /* T8_5 */ + 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, + 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, + 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, + 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, + 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, + 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, + 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, + 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, + 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, + 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, + 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, + 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, + 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, + 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, + 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, + 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, + 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, + 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, + 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, + 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, + 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, + 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, + 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, + 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, + 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, + 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, + 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, + 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, + 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, + 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, + 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, + 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, + 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, + 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, + 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, + 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, + 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, + 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, + 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, + 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, + 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, + 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, + 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, + 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, + 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, + 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, + 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, + 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, + 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, + 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, + 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, + 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, + 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, + 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, + 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, + 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, + 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, + 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, + 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, + 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, + 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, + 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, + 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, + 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C, + /* T8_6 */ + 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, + 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, + 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, + 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, + 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, + 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, + 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, + 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, + 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, + 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, + 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, + 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, + 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, + 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, + 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, + 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, + 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, + 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, + 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, + 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, + 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, + 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, + 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, + 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, + 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, + 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, + 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, + 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, + 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, + 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, + 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, + 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, + 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, + 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, + 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, + 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, + 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, + 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, + 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, + 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, + 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, + 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, + 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, + 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, + 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, + 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, + 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, + 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, + 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, + 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, + 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, + 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, + 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, + 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, + 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, + 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, + 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, + 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, + 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, + 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, + 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, + 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, + 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, + 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F, + /* T8_7 */ + 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, + 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, + 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, + 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, + 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, + 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, + 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, + 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, + 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, + 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, + 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, + 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, + 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, + 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, + 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, + 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, + 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, + 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, + 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, + 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, + 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, + 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, + 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, + 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, + 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, + 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, + 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, + 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, + 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, + 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, + 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, + 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, + 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, + 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, + 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, + 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, + 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, + 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, + 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, + 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, + 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, + 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, + 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, + 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, + 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, + 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, + 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, + 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, + 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, + 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, + 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, + 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, + 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, + 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, + 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, + 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, + 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, + 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, + 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, + 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, + 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, + 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, + 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, + 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 + }; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java index 0b7ae2d7b31e..11d9028f1900 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java @@ -79,9 +79,11 @@ public static Map getAuditParams( auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) .toString()); - }catch (IOException ex){ - LOG.trace("Encountered error parsing BlockData from protobuf:" - + ex.getMessage()); + } catch (IOException ex){ + if (LOG.isTraceEnabled()) { + LOG.trace("Encountered error parsing BlockData from protobuf: " + + ex.getMessage()); + } return null; } return auditParams; @@ -134,9 +136,11 @@ public static Map getAuditParams( auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutSmallFile() .getBlock().getBlockData()).toString()); - }catch (IOException ex){ - LOG.trace("Encountered error parsing BlockData from protobuf:" - + ex.getMessage()); + } catch (IOException ex){ + if (LOG.isTraceEnabled()) { + LOG.trace("Encountered error parsing BlockData from protobuf: " + + ex.getMessage()); + } } return auditParams; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java index 1b7391bf5d87..e2ca455ef0c5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java @@ -49,7 +49,7 @@ public LeaseCallbackExecutor(T resource, List> callbacks) { @Override public void run() { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Executing callbacks for lease on {}", resource); } for(Callable callback : callbacks) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java index 756a41af0899..02befaef9804 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java @@ -110,7 +110,7 @@ public synchronized Lease acquire(T resource) public synchronized Lease acquire(T resource, long timeout) throws LeaseAlreadyExistException { checkStatus(); - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout); } if(activeLeases.containsKey(resource)) { @@ -150,7 +150,7 @@ public Lease get(T resource) throws LeaseNotFoundException { public synchronized void release(T resource) throws LeaseNotFoundException { checkStatus(); - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Releasing lease on {}", resource); } Lease lease = activeLeases.remove(resource); @@ -206,7 +206,7 @@ private LeaseMonitor() { @Override public void run() { - while(monitor) { + while (monitor) { LOG.debug("{}-LeaseMonitor: checking for lease expiry", name); long sleepTime = Long.MAX_VALUE; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java index 49efad05feb5..95dfd6c393ca 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java @@ -31,9 +31,12 @@ public final class ActiveLock { /** * Use ActiveLock#newInstance to create instance. + * + * @param fairness - if true the lock uses a fair ordering policy, else + * non-fair ordering. */ - private ActiveLock() { - this.lock = new ReentrantReadWriteLock(); + private ActiveLock(boolean fairness) { + this.lock = new ReentrantReadWriteLock(fairness); this.count = new AtomicInteger(0); } @@ -42,8 +45,8 @@ private ActiveLock() { * * @return new ActiveLock */ - public static ActiveLock newInstance() { - return new ActiveLock(); + public static ActiveLock newInstance(boolean fairness) { + return new ActiveLock(fairness); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java index 670d4d16378b..3c2b5d4a394c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java @@ -37,18 +37,31 @@ public class LockManager { private static final Logger LOG = LoggerFactory.getLogger(LockManager.class); private final Map activeLocks = new ConcurrentHashMap<>(); - private final GenericObjectPool lockPool = - new GenericObjectPool<>(new PooledLockFactory()); + private final GenericObjectPool lockPool; /** - * Creates new LockManager instance with the given Configuration. + * Creates new LockManager instance with the given Configuration.and uses + * non-fair mode for locks. * * @param conf Configuration object */ public LockManager(final Configuration conf) { + this(conf, false); + } + + + /** + * Creates new LockManager instance with the given Configuration. + * + * @param conf Configuration object + * @param fair - true to use fair lock ordering, else non-fair lock ordering. + */ + public LockManager(final Configuration conf, boolean fair) { final int maxPoolSize = conf.getInt( HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY, HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT); + lockPool = + new GenericObjectPool<>(new PooledLockFactory(fair)); lockPool.setMaxTotal(maxPoolSize); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java index 4c24ef74b283..1e3ba05a3a2b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java @@ -26,9 +26,14 @@ */ public class PooledLockFactory extends BasePooledObjectFactory { + private boolean fairness; + + PooledLockFactory(boolean fair) { + this.fairness = fair; + } @Override public ActiveLock create() throws Exception { - return ActiveLock.newInstance(); + return ActiveLock.newInstance(fairness); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java index af56da394cd4..4177b96a354c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java @@ -43,10 +43,9 @@ private JsonUtils() { // Never constructed } - public static String toJsonStringWithDefaultPrettyPrinter(String jsonString) + public static String toJsonStringWithDefaultPrettyPrinter(Object obj) throws IOException { - Object json = READER.readValue(jsonString); - return WRITTER.writeValueAsString(json); + return WRITTER.writeValueAsString(obj); } public static String toJsonString(Object obj) throws IOException { diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto index 8ea72b6cd178..00e58c0bc851 100644 --- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto @@ -59,7 +59,9 @@ message ScmContainerLocationRequest { optional StartReplicationManagerRequestProto startReplicationManagerRequest = 21; optional StopReplicationManagerRequestProto stopReplicationManagerRequest = 22; optional ReplicationManagerStatusRequestProto seplicationManagerStatusRequest = 23; - + optional DecommissionNodesRequestProto decommissionNodesRequest = 24; + optional RecommissionNodesRequestProto recommissionNodesRequest = 25; + optional StartMaintenanceNodesRequestProto startMaintenanceNodesRequest = 26; } message ScmContainerLocationResponse { @@ -91,6 +93,9 @@ message ScmContainerLocationResponse { optional StartReplicationManagerResponseProto startReplicationManagerResponse = 21; optional StopReplicationManagerResponseProto stopReplicationManagerResponse = 22; optional ReplicationManagerStatusResponseProto replicationManagerStatusResponse = 23; + optional DecommissionNodesResponseProto decommissionNodesResponse = 24; + optional RecommissionNodesResponseProto recommissionNodesResponse = 25; + optional StartMaintenanceNodesResponseProto startMaintenanceNodesResponse = 26; enum Status { OK = 1; CONTAINER_ALREADY_EXISTS = 2; @@ -118,6 +123,9 @@ enum Type { StartReplicationManager = 16; StopReplicationManager = 17; GetReplicationManagerStatus = 18; + DecommissionNodes = 19; + RecommissionNodes = 20; + StartMaintenanceNodes = 21; } /** @@ -225,6 +233,40 @@ message NodeQueryResponseProto { repeated Node datanodes = 1; } +/* + Decommission a list of hosts +*/ +message DecommissionNodesRequestProto { + repeated string hosts = 1; +} + +message DecommissionNodesResponseProto { + // empty response +} + +/* + Recommission a list of hosts in maintenance or decommission states +*/ +message RecommissionNodesRequestProto { + repeated string hosts = 1; +} + +message RecommissionNodesResponseProto { + // empty response +} + +/* + Place a list of hosts into maintenance mode +*/ +message StartMaintenanceNodesRequestProto { + repeated string hosts = 1; + optional int64 endInHours = 2; +} + +message StartMaintenanceNodesResponseProto { + // empty response +} + /** Request to create a replication pipeline. */ @@ -326,5 +368,4 @@ message ReplicationManagerStatusResponseProto { */ service StorageContainerLocationProtocolService { rpc submitRequest (ScmContainerLocationRequest) returns (ScmContainerLocationResponse); - } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 31bc65240d29..b0a59fa209cc 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1529,6 +1529,17 @@ + + ozone.om.lock.fair + false + If this is true, the Ozone Manager lock will be used in Fair + mode, which will schedule threads in the order received/queued. If this is + false, uses non-fair ordering. See + java.util.concurrent.locks.ReentrantReadWriteLock + for more information on fair/non-fair locks. + + + ozone.om.ratis.enable false diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java new file mode 100644 index 000000000000..bbe6ab7cca7a --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.ratis; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; +import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; +import org.apache.hadoop.ozone.common.OzoneChecksumException; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Random; +import java.util.UUID; +import java.util.function.BiFunction; + +/** Testing {@link ContainerCommandRequestMessage}. */ +public class TestContainerCommandRequestMessage { + static final Random RANDOM = new Random(); + + static ByteString newData(int length, Random random) { + final ByteString.Output out = ByteString.newOutput(); + for(int i = 0; i < length; i++) { + out.write(random.nextInt()); + } + return out.toByteString(); + } + + static ChecksumData checksum(ByteString data) { + try { + return new Checksum().computeChecksum(data.toByteArray()); + } catch (OzoneChecksumException e) { + throw new IllegalStateException(e); + } + } + + static ContainerCommandRequestProto newPutSmallFile( + BlockID blockID, ByteString data) { + final BlockData.Builder blockData + = BlockData.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()); + final PutBlockRequestProto.Builder putBlockRequest + = PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + final KeyValue keyValue = KeyValue.newBuilder() + .setKey("OverWriteRequested") + .setValue("true") + .build(); + final ChunkInfo chunk = ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk") + .setOffset(0) + .setLen(data.size()) + .addMetadata(keyValue) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + final PutSmallFileRequestProto putSmallFileRequest + = PutSmallFileRequestProto.newBuilder() + .setChunkInfo(chunk) + .setBlock(putBlockRequest) + .setData(data) + .build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(Type.PutSmallFile) + .setContainerID(blockID.getContainerID()) + .setDatanodeUuid(UUID.randomUUID().toString()) + .setPutSmallFile(putSmallFileRequest) + .build(); + } + + static ContainerCommandRequestProto newWriteChunk( + BlockID blockID, ByteString data) { + final ChunkInfo chunk = ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk_" + 1) + .setOffset(0) + .setLen(data.size()) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + + final WriteChunkRequestProto.Builder writeChunkRequest + = WriteChunkRequestProto.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()) + .setChunkData(chunk) + .setData(data); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(Type.WriteChunk) + .setContainerID(blockID.getContainerID()) + .setDatanodeUuid(UUID.randomUUID().toString()) + .setWriteChunk(writeChunkRequest) + .build(); + } + + @Test + public void testPutSmallFile() throws Exception { + runTest(TestContainerCommandRequestMessage::newPutSmallFile); + } + + @Test + public void testWriteChunk() throws Exception { + runTest(TestContainerCommandRequestMessage::newWriteChunk); + } + + static void runTest( + BiFunction method) + throws Exception { + for(int i = 0; i < 2; i++) { + runTest(i, method); + } + for(int i = 2; i < 1 << 10;) { + runTest(i + 1 + RANDOM.nextInt(i - 1), method); + i <<= 1; + runTest(i, method); + } + } + + static void runTest(int length, + BiFunction method) + throws Exception { + System.out.println("length=" + length); + final BlockID blockID = new BlockID(RANDOM.nextLong(), RANDOM.nextLong()); + final ByteString data = newData(length, RANDOM); + + final ContainerCommandRequestProto original = method.apply(blockID, data); + final ContainerCommandRequestMessage message + = ContainerCommandRequestMessage.toMessage(original, null); + final ContainerCommandRequestProto computed + = ContainerCommandRequestMessage.toProto(message.getContent(), null); + Assert.assertEquals(original, computed); + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java new file mode 100644 index 000000000000..2f466377b4b2 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.util.PureJavaCrc32; +import org.apache.hadoop.util.PureJavaCrc32C; +import org.junit.Assert; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.util.Random; +import java.util.zip.Checksum; + +/** + * Test {@link ChecksumByteBuffer} implementations. + */ +public class TestChecksumByteBuffer { + @Test + public void testPureJavaCrc32ByteBuffer() { + final Checksum expected = new PureJavaCrc32(); + final ChecksumByteBuffer testee = new PureJavaCrc32ByteBuffer(); + new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); + } + + @Test + public void testPureJavaCrc32CByteBuffer() { + final Checksum expected = new PureJavaCrc32C(); + final ChecksumByteBuffer testee = new PureJavaCrc32CByteBuffer(); + new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); + } + + static class VerifyChecksumByteBuffer { + private final Checksum expected; + private final ChecksumByteBuffer testee; + + VerifyChecksumByteBuffer(Checksum expected, ChecksumByteBuffer testee) { + this.expected = expected; + this.testee = testee; + } + + void testCorrectness() { + checkSame(); + + checkBytes("hello world!".getBytes(StandardCharsets.UTF_8)); + + final Random random = new Random(); + final byte[] bytes = new byte[1 << 10]; + for (int i = 0; i < 1000; i++) { + random.nextBytes(bytes); + checkBytes(bytes, random.nextInt(bytes.length)); + } + } + + void checkBytes(byte[] bytes) { + checkBytes(bytes, bytes.length); + } + + void checkBytes(byte[] bytes, int length) { + expected.reset(); + testee.reset(); + checkSame(); + + for (byte b : bytes) { + expected.update(b); + testee.update(b); + checkSame(); + } + + expected.reset(); + testee.reset(); + + for (int i = 0; i < length; i++) { + expected.update(bytes, 0, i); + testee.update(bytes, 0, i); + checkSame(); + } + + expected.reset(); + testee.reset(); + checkSame(); + } + + private void checkSame() { + Assert.assertEquals(expected.getValue(), testee.getValue()); + } + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 784f56c29f2d..41415ebe0ac3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -32,6 +32,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.Set; import java.util.List; @@ -40,7 +41,6 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListSet; -import java.util.stream.Collectors; /** @@ -50,24 +50,26 @@ public class ContainerSet { private static final Logger LOG = LoggerFactory.getLogger(ContainerSet.class); - private final ConcurrentSkipListMap containerMap = new + private final ConcurrentSkipListMap> containerMap = new ConcurrentSkipListMap<>(); private final ConcurrentSkipListSet missingContainerSet = new ConcurrentSkipListSet<>(); /** * Add Container to container map. - * @param container + * @param container container to be added * @return If container is added to containerMap returns true, otherwise * false */ - public boolean addContainer(Container container) throws + public boolean addContainer(Container container) throws StorageContainerException { Preconditions.checkNotNull(container, "container cannot be null"); long containerId = container.getContainerData().getContainerID(); - if(containerMap.putIfAbsent(containerId, container) == null) { - LOG.debug("Container with container Id {} is added to containerMap", - containerId); + if (containerMap.putIfAbsent(containerId, container) == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Container with container Id {} is added to containerMap", + containerId); + } // wish we could have done this from ContainerData.setState container.getContainerData().commitSpace(); return true; @@ -81,10 +83,10 @@ public boolean addContainer(Container container) throws /** * Returns the Container with specified containerId. - * @param containerId + * @param containerId ID of the container to get * @return Container */ - public Container getContainer(long containerId) { + public Container getContainer(long containerId) { Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); return containerMap.get(containerId); @@ -92,15 +94,15 @@ public Container getContainer(long containerId) { /** * Removes the Container matching with specified containerId. - * @param containerId + * @param containerId ID of the container to remove * @return If container is removed from containerMap returns true, otherwise * false */ public boolean removeContainer(long containerId) { Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); - Container removed = containerMap.remove(containerId); - if(removed == null) { + Container removed = containerMap.remove(containerId); + if (removed == null) { LOG.debug("Container with containerId {} is not present in " + "containerMap", containerId); return false; @@ -122,9 +124,9 @@ public int containerCount() { /** * Return an container Iterator over {@link ContainerSet#containerMap}. - * @return {@literal Iterator} + * @return {@literal Iterator>} */ - public Iterator getContainerIterator() { + public Iterator> getContainerIterator() { return containerMap.values().iterator(); } @@ -132,26 +134,23 @@ public Iterator getContainerIterator() { * Return an iterator of containers associated with the specified volume. * * @param volume the HDDS volume which should be used to filter containers - * @return {@literal Iterator} + * @return {@literal Iterator>} */ - public Iterator getContainerIterator(HddsVolume volume) { + public Iterator> getContainerIterator(HddsVolume volume) { Preconditions.checkNotNull(volume); Preconditions.checkNotNull(volume.getStorageID()); String volumeUuid = volume.getStorageID(); - return containerMap.values() - .stream() - .filter(x -> volumeUuid.equals( - x.getContainerData().getVolume() - .getStorageID())) - .iterator(); + return containerMap.values().stream() + .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() + .getStorageID())) + .iterator(); } /** * Return an containerMap iterator over {@link ContainerSet#containerMap}. * @return containerMap Iterator */ - public Iterator> getContainerMapIterator() { - containerMap.keySet().stream().collect(Collectors.toSet()); + public Iterator>> getContainerMapIterator() { return containerMap.entrySet().iterator(); } @@ -160,11 +159,11 @@ public Iterator> getContainerMapIterator() { * @return containerMap */ @VisibleForTesting - public Map getContainerMapCopy() { + public Map> getContainerMapCopy() { return ImmutableMap.copyOf(containerMap); } - public Map getContainerMap() { + public Map> getContainerMap() { return Collections.unmodifiableMap(containerMap); } @@ -179,7 +178,6 @@ public Map getContainerMap() { * @param startContainerId - Return containers with Id >= startContainerId. * @param count - how many to return * @param data - Actual containerData - * @throws StorageContainerException */ public void listContainer(long startContainerId, long count, List data) throws @@ -193,14 +191,14 @@ public void listContainer(long startContainerId, long count, "must be positive"); LOG.debug("listContainer returns containerData starting from {} of count " + "{}", startContainerId, count); - ConcurrentNavigableMap map; + ConcurrentNavigableMap> map; if (startContainerId == 0) { map = containerMap.tailMap(containerMap.firstKey(), true); } else { map = containerMap.tailMap(startContainerId, true); } int currentCount = 0; - for (Container entry : map.values()) { + for (Container entry : map.values()) { if (currentCount < count) { data.add(entry.getContainerData()); currentCount++; @@ -214,7 +212,6 @@ public void listContainer(long startContainerId, long count, * Get container report. * * @return The container report. - * @throws IOException */ public ContainerReportsProto getContainerReport() throws IOException { LOG.debug("Starting container report iteration."); @@ -222,13 +219,12 @@ public ContainerReportsProto getContainerReport() throws IOException { // No need for locking since containerMap is a ConcurrentSkipListMap // And we can never get the exact state since close might happen // after we iterate a point. - List containers = containerMap.values().stream().collect( - Collectors.toList()); + List> containers = new ArrayList<>(containerMap.values()); ContainerReportsProto.Builder crBuilder = ContainerReportsProto.newBuilder(); - for (Container container: containers) { + for (Container container: containers) { crBuilder.addReports(container.getContainerReport()); } @@ -257,7 +253,7 @@ public void buildMissingContainerSetAndValidate( LOG.warn("Adding container {} to missing container set.", id); missingContainerSet.add(id); } else { - Container container = containerMap.get(id); + Container container = containerMap.get(id); long containerBCSID = container.getBlockCommitSequenceId(); long snapshotBCSID = mapEntry.getValue(); if (containerBCSID < snapshotBCSID) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 37e19bc28b5f..76f6b3cd2f18 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -155,8 +155,10 @@ public ContainerCommandResponseProto dispatch( private ContainerCommandResponseProto dispatchRequest( ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { Preconditions.checkNotNull(msg); - LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(), - msg.getTraceID()); + if (LOG.isTraceEnabled()) { + LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(), + msg.getTraceID()); + } AuditAction action = ContainerCommandRequestPBHelper.getAuditAction( msg.getCmdType()); @@ -553,7 +555,10 @@ private void audit(AuditAction action, EventType eventType, } break; - default: LOG.debug("Invalid audit event status - " + result); + default: + if (LOG.isDebugEnabled()) { + LOG.debug("Invalid audit event status - " + result); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java index 5c6c319600e5..4dde3d6cb719 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java @@ -55,11 +55,12 @@ public List chooseContainerForBlockDeletion(int count, if (currentCount < count) { result.add(entry); currentCount++; - - LOG.debug("Select container {} for block deletion, " - + "pending deletion blocks num: {}.", - entry.getContainerID(), - ((KeyValueContainerData)entry).getNumPendingDeletionBlocks()); + if (LOG.isDebugEnabled()) { + LOG.debug("Select container {} for block deletion, " + + "pending deletion blocks num: {}.", + entry.getContainerID(), + ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()); + } } else { break; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java index b17680c41983..41fc26716c19 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java @@ -69,12 +69,13 @@ public List chooseContainerForBlockDeletion(int count, if (entry.getNumPendingDeletionBlocks() > 0) { result.add(entry); currentCount++; - - LOG.debug( - "Select container {} for block deletion, " - + "pending deletion blocks num: {}.", - entry.getContainerID(), - entry.getNumPendingDeletionBlocks()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Select container {} for block deletion, " + + "pending deletion blocks num: {}.", + entry.getContainerID(), + entry.getNumPendingDeletionBlocks()); + } } else { LOG.debug("Stop looking for next container, there is no" + " pending deletion block contained in remaining containers."); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index a243c93ed362..f0064ec5d740 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -207,7 +207,9 @@ public void logIfNeeded(Exception ex) { TimeUnit.MILLISECONDS.toSeconds( this.getMissedCount() * getScmHeartbeatInterval(this.conf)), ex); } - LOG.trace("Incrementing the Missed count. Ex : {}", ex); + if (LOG.isTraceEnabled()) { + LOG.trace("Incrementing the Missed count. Ex : {}", ex); + } this.incMissed(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java index 881fea0c7387..2dec08fe83c4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java @@ -114,8 +114,10 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, break; case UNHEALTHY: case INVALID: - LOG.debug("Cannot close the container #{}, the container is" - + " in {} state.", containerId, container.getContainerState()); + if (LOG.isDebugEnabled()) { + LOG.debug("Cannot close the container #{}, the container is" + + " in {} state.", containerId, container.getContainerState()); + } default: break; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index c077e75862f8..cdecf5d7ed47 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -196,9 +196,11 @@ private void deleteKeyValueContainerBlocks( } if (delTX.getTxID() < containerData.getDeleteTransactionId()) { - LOG.debug(String.format("Ignoring delete blocks for containerId: %d." - + " Outdated delete transactionId %d < %d", containerId, - delTX.getTxID(), containerData.getDeleteTransactionId())); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Ignoring delete blocks for containerId: %d." + + " Outdated delete transactionId %d < %d", containerId, + delTX.getTxID(), containerData.getDeleteTransactionId())); + } return; } @@ -216,9 +218,11 @@ private void deleteKeyValueContainerBlocks( DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk); if (containerDB.getStore().get(deletingKeyBytes) != null || containerDB.getStore().get(deletedKeyBytes) != null) { - LOG.debug(String.format( - "Ignoring delete for block %d in container %d." - + " Entry already added.", blk, containerId)); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format( + "Ignoring delete for block %d in container %d." + + " Entry already added.", blk, containerId)); + } continue; } // Found the block in container db, @@ -228,8 +232,10 @@ private void deleteKeyValueContainerBlocks( try { containerDB.getStore().writeBatch(batch); newDeletionBlocks++; - LOG.debug("Transited Block {} to DELETING state in container {}", - blk, containerId); + if (LOG.isDebugEnabled()) { + LOG.debug("Transited Block {} to DELETING state in container {}", + blk, containerId); + } } catch (IOException e) { // if some blocks failed to delete, we fail this TX, // without sending this ACK to SCM, SCM will resend the TX @@ -238,8 +244,10 @@ private void deleteKeyValueContainerBlocks( "Failed to delete blocks for TXID = " + delTX.getTxID(), e); } } else { - LOG.debug("Block {} not found or already under deletion in" - + " container {}, skip deleting it.", blk, containerId); + if (LOG.isDebugEnabled()) { + LOG.debug("Block {} not found or already under deletion in" + + " container {}, skip deleting it.", blk, containerId); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 7b638a3cd8cb..b89ec730f7c3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -313,7 +314,7 @@ public TransactionContext startTransaction(RaftClientRequest request) throws IOException { long startTime = Time.monotonicNowNanos(); final ContainerCommandRequestProto proto = - getContainerCommandRequestProto(request.getMessage().getContent()); + message2ContainerCommandRequestProto(request.getMessage()); Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); try { dispatcher.validateContainerCommand(proto); @@ -363,7 +364,7 @@ public TransactionContext startTransaction(RaftClientRequest request) .setStateMachine(this) .setServerRole(RaftPeerRole.LEADER) .setStateMachineContext(startTime) - .setLogData(request.getMessage().getContent()) + .setLogData(proto.toByteString()) .build(); } @@ -383,11 +384,18 @@ private ContainerCommandRequestProto getContainerCommandRequestProto( .setPipelineID(gid.getUuid().toString()).build(); } + private ContainerCommandRequestProto message2ContainerCommandRequestProto( + Message message) throws InvalidProtocolBufferException { + return ContainerCommandRequestMessage.toProto(message.getContent(), gid); + } + private ContainerCommandResponseProto dispatchCommand( ContainerCommandRequestProto requestProto, DispatcherContext context) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, - requestProto.getCmdType(), requestProto.getContainerID(), - requestProto.getPipelineID(), requestProto.getTraceID()); + if (LOG.isTraceEnabled()) { + LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, + requestProto.getCmdType(), requestProto.getContainerID(), + requestProto.getPipelineID(), requestProto.getTraceID()); + } if (isBlockTokenEnabled) { try { // ServerInterceptors intercepts incoming request and creates ugi. @@ -403,7 +411,9 @@ private ContainerCommandResponseProto dispatchCommand( } ContainerCommandResponseProto response = dispatcher.dispatch(requestProto, context); - LOG.trace("{}: response {}", gid, response); + if (LOG.isTraceEnabled()) { + LOG.trace("{}: response {}", gid, response); + } return response; } @@ -456,9 +466,11 @@ private CompletableFuture handleWriteChunk( }, chunkExecutor); writeChunkFutureMap.put(entryIndex, writeChunkFuture); - LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " - + write.getChunkData().getChunkName()); + if (LOG.isDebugEnabled()) { + LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " + + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + + write.getChunkData().getChunkName()); + } // Remove the future once it finishes execution from the // writeChunkFutureMap. writeChunkFuture.thenApply(r -> { @@ -474,10 +486,12 @@ private CompletableFuture handleWriteChunk( } else { metrics.incNumBytesWrittenCount( requestProto.getWriteChunk().getChunkData().getLen()); - LOG.debug(gid + - ": writeChunk writeStateMachineData completed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName()); + if (LOG.isDebugEnabled()) { + LOG.debug(gid + + ": writeChunk writeStateMachineData completed: blockId" + + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + + write.getChunkData().getChunkName()); + } raftFuture.complete(r::toByteString); metrics.recordWriteStateMachineCompletion( Time.monotonicNowNanos() - startTime); @@ -530,7 +544,7 @@ public CompletableFuture query(Message request) { try { metrics.incNumQueryStateMachineOps(); final ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto(request.getContent()); + message2ContainerCommandRequestProto(request); return CompletableFuture .completedFuture(runCommand(requestProto, null)::toByteString); } catch (IOException e) { @@ -755,10 +769,12 @@ public CompletableFuture applyTransaction(TransactionContext trx) { stateMachineHealthy.compareAndSet(true, false); ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); } else { - LOG.debug( - "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " + + "{} Container Result: {}", gid, r.getCmdType(), index, + r.getMessage(), r.getResult()); + } applyTransactionFuture.complete(r::toByteString); if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { metrics.incNumBytesCommittedCount( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 179547b8444f..80e91cdf55de 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; +import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -516,8 +517,8 @@ private RaftClientRequest createRaftClientRequest( RaftClientRequest.Type type) { return new RaftClientRequest(clientId, server.getId(), RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()), - nextCallId(), Message.valueOf(request.toByteString()), type, - null); + nextCallId(), ContainerCommandRequestMessage.toMessage(request, null), + type, null); } private GroupInfoRequest createGroupInfoRequest( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java index 75dcddf26d8b..800789f6e0e7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java @@ -181,10 +181,12 @@ public Set checkAllVolumes(Collection volumes) final long gap = timer.monotonicNow() - lastAllVolumesCheck; if (gap < minDiskCheckGapMs) { numSkippedChecks.incrementAndGet(); - LOG.trace( - "Skipped checking all volumes, time since last check {} is less " + - "than the minimum gap between checks ({} ms).", - gap, minDiskCheckGapMs); + if (LOG.isTraceEnabled()) { + LOG.trace( + "Skipped checking all volumes, time since last check {} is less " + + "than the minimum gap between checks ({} ms).", + gap, minDiskCheckGapMs); + } return Collections.emptySet(); } @@ -314,7 +316,9 @@ public void onSuccess(@Nonnull VolumeCheckResult result) { switch (result) { case HEALTHY: case DEGRADED: - LOG.debug("Volume {} is {}.", volume, result); + if (LOG.isDebugEnabled()) { + LOG.debug("Volume {} is {}.", volume, result); + } markHealthy(); break; case FAILED: diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java index c8f3b1de9e31..836fdf3e3951 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java @@ -129,9 +129,11 @@ public Optional> schedule( completedChecks.get(target); final long msSinceLastCheck = timer.monotonicNow() - result.completedAt; if (msSinceLastCheck < minMsBetweenChecks) { - LOG.debug("Skipped checking {}. Time since last check {}ms " + - "is less than the min gap {}ms.", - target, msSinceLastCheck, minMsBetweenChecks); + if (LOG.isDebugEnabled()) { + LOG.debug("Skipped checking {}. Time since last check {}ms " + + "is less than the min gap {}ms.", + target, msSinceLastCheck, minMsBetweenChecks); + } return Optional.empty(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java index 3cccdc1bd0c1..ad68c4dc96cd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java @@ -128,8 +128,10 @@ public boolean hasNext() throws IOException { KeyValue block = blockIterator.next(); if (blockFilter.filterKey(null, block.getKey(), null)) { nextBlock = BlockUtils.getBlockData(block.getValue()); - LOG.trace("Block matching with filter found: blockID is : {} for " + - "containerID {}", nextBlock.getLocalID(), containerId); + if (LOG.isTraceEnabled()) { + LOG.trace("Block matching with filter found: blockID is : {} for " + + "containerID {}", nextBlock.getLocalID(), containerId); + } return true; } hasNext(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index d2b26f9bd8af..a4bd37623113 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -110,7 +110,7 @@ public boolean fastCheck() { * @return true : integrity checks pass, false : otherwise. */ public boolean fullCheck(DataTransferThrottler throttler, Canceler canceler) { - boolean valid = false; + boolean valid; try { valid = fastCheck(); @@ -141,7 +141,7 @@ private void checkLayout() throws IOException { private void checkDirPath(String path) throws IOException { File dirPath = new File(path); - String errStr = null; + String errStr; try { if (!dirPath.isDirectory()) { @@ -162,7 +162,7 @@ private void checkDirPath(String path) throws IOException { } private void checkContainerFile() throws IOException { - /** + /* * compare the values in the container file loaded from disk, * with the values we are expecting */ @@ -193,10 +193,10 @@ private void checkContainerFile() throws IOException { } KeyValueContainerData kvData = onDiskContainerData; - if (!metadataPath.toString().equals(kvData.getMetadataPath())) { + if (!metadataPath.equals(kvData.getMetadataPath())) { String errStr = "Bad metadata path in Containerdata for " + containerID + "Expected [" - + metadataPath.toString() + "] Got [" + kvData.getMetadataPath() + + metadataPath + "] Got [" + kvData.getMetadataPath() + "]"; throw new IOException(errStr); } @@ -204,15 +204,12 @@ private void checkContainerFile() throws IOException { private void scanData(DataTransferThrottler throttler, Canceler canceler) throws IOException { - /** + /* * Check the integrity of the DB inside each container. - * In Scope: * 1. iterate over each key (Block) and locate the chunks for the block - * 2. garbage detection : chunks which exist in the filesystem, - * but not in the DB. This function is implemented as HDDS-1202 - * Not in scope: - * 1. chunk checksum verification. this is left to a separate - * slow chunk scanner + * 2. garbage detection (TBD): chunks which exist in the filesystem, + * but not in the DB. This function will be implemented in HDDS-1202 + * 3. chunk checksum verification. */ Preconditions.checkState(onDiskContainerData != null, "invoke loadContainerData prior to calling this function"); @@ -255,21 +252,20 @@ private void scanData(DataTransferThrottler throttler, Canceler canceler) chunk.getChecksumData().getType(), chunk.getChecksumData().getBytesPerChecksum(), chunk.getChecksumData().getChecksumsList()); + Checksum cal = new Checksum(cData.getChecksumType(), + cData.getBytesPerChecksum()); long bytesRead = 0; byte[] buffer = new byte[cData.getBytesPerChecksum()]; try (InputStream fs = new FileInputStream(chunkFile)) { - int i = 0, v = 0; - for (; i < length; i++) { - v = fs.read(buffer); + for (int i = 0; i < length; i++) { + int v = fs.read(buffer); if (v == -1) { break; } bytesRead += v; throttler.throttle(v, canceler); - Checksum cal = new Checksum(cData.getChecksumType(), - cData.getBytesPerChecksum()); ByteString expected = cData.getChecksums().get(i); - ByteString actual = cal.computeChecksum(buffer) + ByteString actual = cal.computeChecksum(buffer, 0, v) .getChecksums().get(0); if (!Arrays.equals(expected.toByteArray(), actual.toByteArray())) { @@ -283,7 +279,7 @@ private void scanData(DataTransferThrottler throttler, Canceler canceler) } } - if (v == -1 && i < length) { + if (bytesRead != chunk.getLen()) { throw new OzoneChecksumException(String .format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index f39973fbacda..bc418839f28b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Function; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; @@ -46,7 +47,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .PutSmallFileRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.scm.ByteStringHelper; +import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; @@ -102,6 +103,7 @@ public class KeyValueHandler extends Handler { private final ChunkManager chunkManager; private final VolumeChoosingPolicy volumeChoosingPolicy; private final long maxContainerSize; + private final Function byteBufferToByteString; // A lock that is held during container creation. private final AutoCloseableLock containerCreationLock; @@ -125,10 +127,8 @@ public KeyValueHandler(Configuration config, StateContext context, // this handler lock is used for synchronizing createContainer Requests, // so using a fair lock here. containerCreationLock = new AutoCloseableLock(new ReentrantLock(true)); - boolean isUnsafeByteOperationsEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); - ByteStringHelper.init(isUnsafeByteOperationsEnabled); + byteBufferToByteString = + ByteStringConversion.createByteBufferConversion(conf); } @VisibleForTesting @@ -206,8 +206,10 @@ public BlockManager getBlockManager() { ContainerCommandResponseProto handleCreateContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasCreateContainer()) { - LOG.debug("Malformed Create Container request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Create Container request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } // Create Container request should be passed a null container as the @@ -269,8 +271,10 @@ public void populateContainerPathFields(KeyValueContainer container, ContainerCommandResponseProto handleReadContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasReadContainer()) { - LOG.debug("Malformed Read Container request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Read Container request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -296,8 +300,10 @@ ContainerCommandResponseProto handleUpdateContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasUpdateContainer()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Update Container request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -330,8 +336,10 @@ ContainerCommandResponseProto handleDeleteContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasDeleteContainer()) { - LOG.debug("Malformed Delete container request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Delete container request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -352,8 +360,10 @@ ContainerCommandResponseProto handleCloseContainer( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasCloseContainer()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Update Container request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } try { @@ -379,8 +389,10 @@ ContainerCommandResponseProto handlePutBlock( long blockLength; if (!request.hasPutBlock()) { - LOG.debug("Malformed Put Key request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Put Key request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -415,8 +427,10 @@ ContainerCommandResponseProto handleGetBlock( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasGetBlock()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Get Key request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -454,8 +468,10 @@ ContainerCommandResponseProto handleGetBlock( ContainerCommandResponseProto handleGetCommittedBlockLength( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasGetCommittedBlockLength()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Get Key request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -490,8 +506,10 @@ ContainerCommandResponseProto handleDeleteBlock( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasDeleteBlock()) { - LOG.debug("Malformed Delete Key request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Delete Key request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -521,13 +539,15 @@ ContainerCommandResponseProto handleReadChunk( DispatcherContext dispatcherContext) { if (!request.hasReadChunk()) { - LOG.debug("Malformed Read Chunk request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Read Chunk request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. + // The operation will likely fail/timeout if that happens. try { checkContainerIsHealthy(kvContainer); } catch (StorageContainerException sce) { @@ -535,7 +555,7 @@ ContainerCommandResponseProto handleReadChunk( } ChunkInfo chunkInfo; - byte[] data; + ByteBuffer data; try { BlockID blockID = BlockID.getFromProtobuf( request.getReadChunk().getBlockID()); @@ -549,7 +569,7 @@ ContainerCommandResponseProto handleReadChunk( data = chunkManager .readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); - metrics.incContainerBytesStats(Type.ReadChunk, data.length); + metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen()); } catch (StorageContainerException ex) { return ContainerUtils.logAndReturnError(LOG, ex, request); } catch (IOException ex) { @@ -558,7 +578,18 @@ ContainerCommandResponseProto handleReadChunk( request); } - return ChunkUtils.getReadChunkResponse(request, data, chunkInfo); + Preconditions.checkNotNull(data, "Chunk data is null"); + + ContainerProtos.ReadChunkResponseProto.Builder response = + ContainerProtos.ReadChunkResponseProto.newBuilder(); + response.setChunkData(chunkInfo.getProtoBufMessage()); + response.setData(byteBufferToByteString.apply(data)); + response.setBlockID(request.getReadChunk().getBlockID()); + + ContainerCommandResponseProto.Builder builder = + ContainerUtils.getSuccessResponseBuilder(request); + builder.setReadChunk(response); + return builder.build(); } /** @@ -589,8 +620,10 @@ ContainerCommandResponseProto handleDeleteChunk( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasDeleteChunk()) { - LOG.debug("Malformed Delete Chunk request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Delete Chunk request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -632,8 +665,10 @@ ContainerCommandResponseProto handleWriteChunk( DispatcherContext dispatcherContext) { if (!request.hasWriteChunk()) { - LOG.debug("Malformed Write Chunk request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Write Chunk request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -687,8 +722,10 @@ ContainerCommandResponseProto handlePutSmallFile( DispatcherContext dispatcherContext) { if (!request.hasPutSmallFile()) { - LOG.debug("Malformed Put Small File request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Put Small File request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } PutSmallFileRequestProto putSmallFileReq = @@ -745,8 +782,10 @@ ContainerCommandResponseProto handleGetSmallFile( ContainerCommandRequestProto request, KeyValueContainer kvContainer) { if (!request.hasGetSmallFile()) { - LOG.debug("Malformed Get Small File request. trace ID: {}", - request.getTraceID()); + if (LOG.isDebugEnabled()) { + LOG.debug("Malformed Get Small File request. trace ID: {}", + request.getTraceID()); + } return ContainerUtils.malformedRequest(request); } @@ -772,9 +811,9 @@ ContainerCommandResponseProto handleGetSmallFile( for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) { // if the block is committed, all chunks must have been committed. // Tmp chunk files won't exist here. - byte[] data = chunkManager.readChunk(kvContainer, blockID, + ByteBuffer data = chunkManager.readChunk(kvContainer, blockID, ChunkInfo.getFromProtoBuf(chunk), dispatcherContext); - ByteString current = ByteString.copyFrom(data); + ByteString current = byteBufferToByteString.apply(data); dataBuf = dataBuf.concat(current); chunkInfo = chunk; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index a043cdce2e0f..8ca59b591464 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -24,9 +24,6 @@ .ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkResponseProto; -import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.io.IOUtils; @@ -80,7 +77,7 @@ public static void writeData(File chunkFile, ChunkInfo chunkInfo, ByteBuffer data, VolumeIOStats volumeIOStats, boolean sync) throws StorageContainerException, ExecutionException, InterruptedException, NoSuchAlgorithmException { - int bufferSize = data.capacity(); + final int bufferSize = data.remaining(); Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); if (bufferSize != chunkInfo.getLen()) { String err = String.format("data array does not match the length " + @@ -127,8 +124,10 @@ public static void writeData(File chunkFile, ChunkInfo chunkInfo, return null; }); - log.debug("Write Chunk completed for chunkFile: {}, size {}", chunkFile, - bufferSize); + if (log.isDebugEnabled()) { + log.debug("Write Chunk completed for chunkFile: {}, size {}", chunkFile, + bufferSize); + } } /** @@ -140,8 +139,7 @@ public static void writeData(File chunkFile, ChunkInfo chunkInfo, * @return ByteBuffer */ public static ByteBuffer readData(File chunkFile, ChunkInfo data, - VolumeIOStats volumeIOStats) throws StorageContainerException, - ExecutionException, InterruptedException { + VolumeIOStats volumeIOStats) throws StorageContainerException { Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); if (!chunkFile.exists()) { @@ -166,6 +164,7 @@ public static ByteBuffer readData(File chunkFile, ChunkInfo data, try (FileLock ignored = file.lock(offset, len, true)) { file.read(buf, offset); + buf.flip(); } // Increment volumeIO stats here. @@ -285,33 +284,6 @@ public static ContainerCommandResponseProto getChunkResponseSuccess( return ContainerUtils.getSuccessResponse(msg); } - /** - * Gets a response to the read chunk calls. - * - * @param msg - Msg - * @param data - Data - * @param info - Info - * @return Response. - */ - public static ContainerCommandResponseProto getReadChunkResponse( - ContainerCommandRequestProto msg, byte[] data, ChunkInfo info) { - Preconditions.checkNotNull(msg); - Preconditions.checkNotNull(data, "Chunk data is null"); - Preconditions.checkNotNull(info, "Chunk Info is null"); - - ReadChunkResponseProto.Builder response = - ReadChunkResponseProto.newBuilder(); - response.setChunkData(info.getProtoBufMessage()); - response.setData( - ByteStringHelper.getByteString(data)); - response.setBlockID(msg.getReadChunk().getBlockID()); - - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setReadChunk(response); - return builder.build(); - } - @VisibleForTesting static T processFileExclusively( Path path, CheckedSupplier op diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index d7731e408db2..4272861c57e2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -120,9 +120,11 @@ public long putBlock(Container container, BlockData data) throws IOException { container.updateBlockCommitSequenceId(bcsId); // Increment keycount here container.getContainerData().incrKeyCount(); - LOG.debug( - "Block " + data.getBlockID() + " successfully committed with bcsId " - + bcsId + " chunk size " + data.getChunks().size()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Block " + data.getBlockID() + " successfully committed with bcsId " + + bcsId + " chunk size " + data.getChunks().size()); + } return data.getSize(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java index 9d63c166034f..fa9e205786e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java @@ -120,8 +120,8 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Explore if we need to do that for ozone. */ @Override - public byte[] readChunk(Container container, BlockID blockID, ChunkInfo info, - DispatcherContext dispatcherContext) { + public ByteBuffer readChunk(Container container, BlockID blockID, + ChunkInfo info, DispatcherContext dispatcherContext) { long readStartTime = Time.monotonicNow(); @@ -138,7 +138,7 @@ public byte[] readChunk(Container container, BlockID blockID, ChunkInfo info, volumeIOStats.incReadOpCount(); volumeIOStats.incReadBytes(info.getLen()); - return data.array(); + return data; } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java index fc079374d413..e22841eec8a4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java @@ -87,10 +87,11 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, boolean isOverwrite = ChunkUtils.validateChunkForOverwrite( chunkFile, info); File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext); - - LOG.debug( - "writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", - info.getChunkName(), stage, chunkFile, tmpChunkFile); + if (LOG.isDebugEnabled()) { + LOG.debug( + "writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", + info.getChunkName(), stage, chunkFile, tmpChunkFile); + } switch (stage) { case WRITE_DATA: @@ -194,44 +195,34 @@ protected void updateContainerWriteStats(Container container, ChunkInfo info, * TODO: Right now we do not support partial reads and writes of chunks. * TODO: Explore if we need to do that for ozone. */ - public byte[] readChunk(Container container, BlockID blockID, ChunkInfo info, - DispatcherContext dispatcherContext) throws StorageContainerException { - try { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - ByteBuffer data; - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); + public ByteBuffer readChunk(Container container, BlockID blockID, + ChunkInfo info, DispatcherContext dispatcherContext) + throws StorageContainerException { + KeyValueContainerData containerData = (KeyValueContainerData) container + .getContainerData(); + ByteBuffer data; + HddsVolume volume = containerData.getVolume(); + VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - // Checking here, which layout version the container is, and reading - // the chunk file in that format. - // In version1, we verify checksum if it is available and return data - // of the chunk file. - if (containerData.getLayOutVersion() == ChunkLayOutVersion - .getLatestVersion().getVersion()) { - File chunkFile = ChunkUtils.getChunkFile(containerData, info); + // Checking here, which layout version the container is, and reading + // the chunk file in that format. + // In version1, we verify checksum if it is available and return data + // of the chunk file. + if (containerData.getLayOutVersion() == ChunkLayOutVersion + .getLatestVersion().getVersion()) { + File chunkFile = ChunkUtils.getChunkFile(containerData, info); - // In case the chunk file does not exist but tmp chunk file exist, - // read from tmp chunk file if readFromTmpFile is set to true - if (!chunkFile.exists() && dispatcherContext != null - && dispatcherContext.isReadFromTmpFile()) { - chunkFile = getTmpChunkFile(chunkFile, dispatcherContext); - } - data = ChunkUtils.readData(chunkFile, info, volumeIOStats); - containerData.incrReadCount(); - long length = chunkFile.length(); - containerData.incrReadBytes(length); - return data.array(); + // In case the chunk file does not exist but tmp chunk file exist, + // read from tmp chunk file if readFromTmpFile is set to true + if (!chunkFile.exists() && dispatcherContext != null + && dispatcherContext.isReadFromTmpFile()) { + chunkFile = getTmpChunkFile(chunkFile, dispatcherContext); } - } catch (ExecutionException ex) { - LOG.error("read data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", - ex, CONTAINER_INTERNAL_ERROR); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.error("read data failed. error: {}", e); - throw new StorageContainerException("Internal error: ", - e, CONTAINER_INTERNAL_ERROR); + data = ChunkUtils.readData(chunkFile, info, volumeIOStats); + containerData.incrReadCount(); + long length = chunkFile.length(); + containerData.incrReadBytes(length); + return data; } return null; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java index 5a6898f558a8..5adb6415ec1c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java @@ -59,7 +59,7 @@ void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Right now we do not support partial reads and writes of chunks. * TODO: Explore if we need to do that for ozone. */ - byte[] readChunk(Container container, BlockID blockID, ChunkInfo info, + ByteBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException; /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 626e9238ff9a..bc3f51a54ef5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -285,8 +285,10 @@ public BackgroundTaskResult call() throws Exception { File chunkFile = dataDir.toPath() .resolve(chunkInfo.getChunkName()).toFile(); if (FileUtils.deleteQuietly(chunkFile)) { - LOG.debug("block {} chunk {} deleted", blockName, - chunkFile.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("block {} chunk {} deleted", blockName, + chunkFile.getAbsolutePath()); + } } } succeedBlocks.add(blockName); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index eb672a76d62c..8bbdec96695e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -140,7 +140,6 @@ public void exportContainer(final ContainerType type, * @param containerId Id of the container to be deleted * @param force if this is set to true, we delete container without checking * state of the container. - * @throws IOException */ public void deleteContainer(final long containerId, boolean force) throws IOException { @@ -160,7 +159,7 @@ private Handler getHandler(final Container container) { return handlers.get(container.getContainerType()); } - public Iterator getContainers() { + public Iterator> getContainers() { return containerSet.getContainerIterator(); } @@ -171,7 +170,8 @@ public Iterator getContainers() { * @param volume the HDDS volume which should be used to filter containers * @return {@literal Iterator} */ - public Iterator getContainers(HddsVolume volume) { + public Iterator> getContainers(HddsVolume volume) { return containerSet.getContainerIterator(volume); } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java index 799c8fed3686..1141951dcc00 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java @@ -22,7 +22,6 @@ import java.util.concurrent.TimeUnit; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -46,6 +45,7 @@ public class ContainerDataScanner extends Thread { private final DataTransferThrottler throttler; private final Canceler canceler; private final ContainerDataScrubberMetrics metrics; + private final long dataScanInterval; /** * True if the thread is stopping.

@@ -54,22 +54,24 @@ public class ContainerDataScanner extends Thread { private volatile boolean stopping = false; - public ContainerDataScanner(Configuration conf, + public ContainerDataScanner(ContainerScrubberConfiguration conf, ContainerController controller, - HddsVolume volume, long bytesPerSec) { + HddsVolume volume) { this.controller = controller; this.volume = volume; - this.throttler = new HddsDataTransferThrottler(bytesPerSec); - this.canceler = new Canceler(); - this.metrics = ContainerDataScrubberMetrics.create(conf, - volume.toString()); + dataScanInterval = conf.getDataScanInterval(); + throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume()); + canceler = new Canceler(); + metrics = ContainerDataScrubberMetrics.create(volume.toString()); setName("ContainerDataScanner(" + volume + ")"); setDaemon(true); } @Override public void run() { - LOG.trace("{}: thread starting.", this); + if (LOG.isTraceEnabled()) { + LOG.trace("{}: thread starting.", this); + } try { while (!stopping) { runIteration(); @@ -89,7 +91,7 @@ public void run() { @VisibleForTesting public void runIteration() { long startTime = System.nanoTime(); - Iterator itr = controller.getContainers(volume); + Iterator> itr = controller.getContainers(volume); while (!stopping && itr.hasNext()) { Container c = itr.next(); if (c.shouldScanData()) { @@ -110,16 +112,26 @@ public void runIteration() { } long totalDuration = System.nanoTime() - startTime; if (!stopping) { - metrics.incNumScanIterations(); - LOG.info("Completed an iteration of container data scrubber in" + - " {} minutes." + - " Number of iterations (since the data-node restart) : {}" + - ", Number of containers scanned in this iteration : {}" + - ", Number of unhealthy containers found in this iteration : {}", - TimeUnit.NANOSECONDS.toMinutes(totalDuration), - metrics.getNumScanIterations(), - metrics.getNumContainersScanned(), - metrics.getNumUnHealthyContainers()); + if (metrics.getNumContainersScanned() > 0) { + metrics.incNumScanIterations(); + LOG.info("Completed an iteration of container data scrubber in" + + " {} minutes." + + " Number of iterations (since the data-node restart) : {}" + + ", Number of containers scanned in this iteration : {}" + + ", Number of unhealthy containers found in this iteration : {}", + TimeUnit.NANOSECONDS.toMinutes(totalDuration), + metrics.getNumScanIterations(), + metrics.getNumContainersScanned(), + metrics.getNumUnHealthyContainers()); + } + long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(totalDuration); + long remainingSleep = dataScanInterval - elapsedMillis; + if (remainingSleep > 0) { + try { + Thread.sleep(remainingSleep); + } catch (InterruptedException ignored) { + } + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java index 02e484fbdf53..3cf4f588322a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; @@ -103,8 +102,7 @@ private ContainerDataScrubberMetrics(String name, MetricsSystem ms) { this.ms = ms; } - public static ContainerDataScrubberMetrics create(final Configuration conf, - final String volumeName) { + public static ContainerDataScrubberMetrics create(final String volumeName) { MetricsSystem ms = DefaultMetricsSystem.instance(); String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty() ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt() diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java index bd400e968c74..46aaf73a12dd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,24 +43,22 @@ public class ContainerMetadataScanner extends Thread { */ private boolean stopping = false; - public ContainerMetadataScanner(Configuration conf, - ContainerController controller, - long metadataScanInterval) { + public ContainerMetadataScanner(ContainerScrubberConfiguration conf, + ContainerController controller) { this.controller = controller; - this.metadataScanInterval = metadataScanInterval; - this.metrics = ContainerMetadataScrubberMetrics.create(conf); + this.metadataScanInterval = conf.getMetadataScanInterval(); + this.metrics = ContainerMetadataScrubberMetrics.create(); setName("ContainerMetadataScanner"); setDaemon(true); } @Override public void run() { - /** - * the outer daemon loop exits on down() + /* + * the outer daemon loop exits on shutdown() */ LOG.info("Background ContainerMetadataScanner starting up"); while (!stopping) { - long start = System.nanoTime(); runIteration(); if(!stopping) { metrics.resetNumUnhealthyContainers(); @@ -71,9 +68,9 @@ public void run() { } @VisibleForTesting - public void runIteration() { + void runIteration() { long start = System.nanoTime(); - Iterator containerIt = controller.getContainers(); + Iterator> containerIt = controller.getContainers(); while (!stopping && containerIt.hasNext()) { Container container = containerIt.next(); try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java index 11629671fb32..3effc351b005 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; @@ -83,9 +82,9 @@ private ContainerMetadataScrubberMetrics(String name, MetricsSystem ms) { this.ms = ms; } - public static ContainerMetadataScrubberMetrics create(Configuration conf) { + public static ContainerMetadataScrubberMetrics create() { MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerDataScrubberMetrics"; + String name = "ContainerMetadataScrubberMetrics"; return ms.register(name, null, new ContainerMetadataScrubberMetrics(name, ms)); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java index bc830b6efc57..454ce84310aa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java @@ -29,6 +29,7 @@ public class ContainerScrubberConfiguration { private boolean enabled; private long metadataScanInterval; + private long dataScanInterval; private long bandwidthPerVolume; @Config(key = "enabled", @@ -58,6 +59,22 @@ public long getMetadataScanInterval() { return metadataScanInterval; } + @Config(key = "data.scan.interval", + type = ConfigType.TIME, + defaultValue = "1m", + tags = { ConfigTag.STORAGE }, + description = "Minimum time interval between two iterations of container" + + " data scanning. If an iteration takes less time than this, the" + + " scanner will wait before starting the next iteration." + ) + public void setDataScanInterval(long dataScanInterval) { + this.dataScanInterval = dataScanInterval; + } + + public long getDataScanInterval() { + return dataScanInterval; + } + @Config(key = "volume.bytes.per.second", type = ConfigType.LONG, defaultValue = "1048576", diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index d52cf8c7ab49..a026f0e8757b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -172,22 +172,18 @@ private void startContainerScrub() { ContainerScrubberConfiguration c = config.getObject( ContainerScrubberConfiguration.class); boolean enabled = c.isEnabled(); - long metadataScanInterval = c.getMetadataScanInterval(); - long bytesPerSec = c.getBandwidthPerVolume(); if (!enabled) { LOG.info("Background container scanner has been disabled."); } else { if (this.metadataScanner == null) { - this.metadataScanner = new ContainerMetadataScanner(config, controller, - metadataScanInterval); + this.metadataScanner = new ContainerMetadataScanner(c, controller); } this.metadataScanner.start(); dataScanners = new ArrayList<>(); for (HddsVolume v : volumeSet.getVolumesList()) { - ContainerDataScanner s = new ContainerDataScanner(config, controller, - v, bytesPerSec); + ContainerDataScanner s = new ContainerDataScanner(c, controller, v); s.start(); dataScanners.add(s); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index 09d2d8ec34e6..e1e7119727b3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -96,12 +96,11 @@ public void testIteratorsAndCount() throws StorageContainerException { assertEquals(10, containerSet.containerCount()); - // Using containerIterator. - Iterator containerIterator = containerSet.getContainerIterator(); + Iterator> iterator = containerSet.getContainerIterator(); int count = 0; - while(containerIterator.hasNext()) { - Container kv = containerIterator.next(); + while(iterator.hasNext()) { + Container kv = iterator.next(); ContainerData containerData = kv.getContainerData(); long containerId = containerData.getContainerID(); if (containerId%2 == 0) { @@ -116,7 +115,7 @@ public void testIteratorsAndCount() throws StorageContainerException { assertEquals(10, count); //Using containerMapIterator. - Iterator> containerMapIterator = containerSet + Iterator>> containerMapIterator = containerSet .getContainerMapIterator(); count = 0; @@ -160,26 +159,25 @@ public void testIteratorPerVolume() throws StorageContainerException { containerSet.addContainer(kv); } - Iterator iter1 = containerSet.getContainerIterator(vol1); + Iterator> iter1 = containerSet.getContainerIterator(vol1); int count1 = 0; while (iter1.hasNext()) { Container c = iter1.next(); - assertTrue((c.getContainerData().getContainerID() % 2) == 0); + assertEquals(0, (c.getContainerData().getContainerID() % 2)); count1++; } assertEquals(5, count1); - Iterator iter2 = containerSet.getContainerIterator(vol2); + Iterator> iter2 = containerSet.getContainerIterator(vol2); int count2 = 0; while (iter2.hasNext()) { Container c = iter2.next(); - assertTrue((c.getContainerData().getContainerID() % 2) == 1); + assertEquals(1, (c.getContainerData().getContainerID() % 2)); count2++; } assertEquals(5, count2); } - @Test public void testGetContainerReport() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java index cf9ea891ebef..84ab56da8645 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java @@ -41,7 +41,6 @@ import java.io.File; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.UUID; import static java.nio.charset.StandardCharsets.UTF_8; @@ -65,7 +64,7 @@ public class TestChunkManagerImpl { private BlockID blockID; private ChunkManagerImpl chunkManager; private ChunkInfo chunkInfo; - private byte[] data; + private ByteBuffer data; @Rule public TemporaryFolder folder = new TemporaryFolder(); @@ -92,11 +91,11 @@ public void setUp() throws Exception { keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - data = "testing write chunks".getBytes(UTF_8); + data = ByteBuffer.wrap("testing write chunks".getBytes(UTF_8)); // Creating BlockData blockID = new BlockID(1L, 1L); chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, data.length); + .getLocalID(), 0), 0, data.capacity()); // Create a ChunkManager object. chunkManager = new ChunkManagerImpl(true); @@ -118,8 +117,8 @@ public void testWriteChunkStageWriteAndCommit() throws Exception { // As no chunks are written to the volume writeBytes should be 0 checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), new DispatcherContext.Builder() + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + new DispatcherContext.Builder() .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build()); // Now a chunk file is being written with Stage WRITE_DATA, so it should // create a temporary chunk file. @@ -137,13 +136,13 @@ public void testWriteChunkStageWriteAndCommit() throws Exception { // As chunk write stage is WRITE_DATA, temp chunk file will be created. assertTrue(tempChunkFile.exists()); - checkWriteIOStats(data.length, 1); + checkWriteIOStats(data.capacity(), 1); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), new DispatcherContext.Builder() + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + new DispatcherContext.Builder() .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build()); - checkWriteIOStats(data.length, 1); + checkWriteIOStats(data.capacity(), 1); // Old temp file should have been renamed to chunk file. assertTrue(chunksPath.listFiles().length == 1); @@ -160,8 +159,8 @@ public void testWriteChunkIncorrectLength() throws Exception { long randomLength = 200L; chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID .getLocalID(), 0), 0, randomLength); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), getDispatcherContext()); + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + getDispatcherContext()); fail("testWriteChunkIncorrectLength failed"); } catch (StorageContainerException ex) { // As we got an exception, writeBytes should be 0. @@ -181,35 +180,36 @@ public void testWriteChunkStageCombinedData() throws Exception { // Initially chunks folder should be empty. assertTrue(chunksPath.listFiles().length == 0); checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), getDispatcherContext()); + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + getDispatcherContext()); // Now a chunk file is being written with Stage COMBINED_DATA, so it should // create a chunk file. assertTrue(chunksPath.listFiles().length == 1); File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo); assertTrue(chunkFile.exists()); - checkWriteIOStats(data.length, 1); + checkWriteIOStats(data.capacity(), 1); } @Test public void testReadChunk() throws Exception { checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), getDispatcherContext()); - checkWriteIOStats(data.length, 1); + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + getDispatcherContext()); + checkWriteIOStats(data.capacity(), 1); checkReadIOStats(0, 0); - byte[] expectedData = chunkManager.readChunk(keyValueContainer, blockID, + ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, blockID, chunkInfo, getDispatcherContext()); - assertEquals(expectedData.length, data.length); - assertTrue(Arrays.equals(expectedData, data)); - checkReadIOStats(data.length, 1); + assertEquals(expectedData.limit()-expectedData.position(), + chunkInfo.getLen()); + assertTrue(expectedData.rewind().equals(data.rewind())); + checkReadIOStats(expectedData.capacity(), 1); } @Test public void testDeleteChunk() throws Exception { File chunksPath = new File(keyValueContainerData.getChunksPath()); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), getDispatcherContext()); + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + getDispatcherContext()); assertTrue(chunksPath.listFiles().length == 1); chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo); assertTrue(chunksPath.listFiles().length == 0); @@ -218,8 +218,8 @@ public void testDeleteChunk() throws Exception { @Test public void testDeleteChunkUnsupportedRequest() throws Exception { try { - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), getDispatcherContext()); + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + getDispatcherContext()); long randomLength = 200L; chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID .getLocalID(), 0), 0, randomLength); @@ -235,8 +235,8 @@ public void testDeleteChunkUnsupportedRequest() throws Exception { public void testReadChunkFileNotExists() throws Exception { try { // trying to read a chunk, where chunk file does not exist - byte[] expectedData = chunkManager.readChunk(keyValueContainer, blockID, - chunkInfo, getDispatcherContext()); + ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, + blockID, chunkInfo, getDispatcherContext()); fail("testReadChunkFileNotExists failed"); } catch (StorageContainerException ex) { GenericTestUtils.assertExceptionContains("Unable to find the chunk " + @@ -249,20 +249,21 @@ public void testReadChunkFileNotExists() throws Exception { public void testWriteAndReadChunkMultipleTimes() throws Exception { for (int i=0; i<100; i++) { chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), i), 0, data.length); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, - ByteBuffer.wrap(data), getDispatcherContext()); + .getLocalID(), i), 0, data.capacity()); + chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, + getDispatcherContext()); + data.rewind(); } - checkWriteIOStats(data.length*100, 100); + checkWriteIOStats(data.capacity()*100, 100); assertTrue(hddsVolume.getVolumeIOStats().getWriteTime() > 0); for (int i=0; i<100; i++) { chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), i), 0, data.length); + .getLocalID(), i), 0, data.capacity()); chunkManager.readChunk(keyValueContainer, blockID, chunkInfo, getDispatcherContext()); } - checkReadIOStats(data.length*100, 100); + checkReadIOStats(data.capacity()*100, 100); assertTrue(hddsVolume.getVolumeIOStats().getReadTime() > 0); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index eeeb364d6d56..fe702fc693a2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.keyvalue; import com.google.common.primitives.Longs; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.client.BlockID; @@ -47,7 +48,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.RandomAccessFile; import java.util.Arrays; @@ -63,6 +63,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; @@ -74,7 +75,6 @@ private final String storeImpl; private KeyValueContainer container; private KeyValueContainerData containerData; - private ChunkManagerImpl chunkManager; private VolumeSet volumeSet; private OzoneConfiguration conf; private File testRoot; @@ -103,7 +103,6 @@ public TestKeyValueContainerCheck(String metadataImpl) { /** * Sanity test, when there are no corruptions induced. - * @throws Exception */ @Test public void testKeyValueContainerCheckNoCorruption() throws Exception { @@ -111,23 +110,19 @@ public void testKeyValueContainerCheckNoCorruption() throws Exception { int deletedBlocks = 1; int normalBlocks = 3; int chunksPerBlock = 4; - boolean valid = false; ContainerScrubberConfiguration c = conf.getObject( ContainerScrubberConfiguration.class); // test Closed Container - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, 65536, + createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, chunksPerBlock); - File chunksPath = new File(containerData.getChunksPath()); - assertTrue(chunksPath.listFiles().length - == (deletedBlocks + normalBlocks) * chunksPerBlock); KeyValueContainerCheck kvCheck = new KeyValueContainerCheck(containerData.getMetadataPath(), conf, containerID); // first run checks on a Open Container - valid = kvCheck.fastCheck(); + boolean valid = kvCheck.fastCheck(); assertTrue(valid); container.close(); @@ -140,7 +135,6 @@ public void testKeyValueContainerCheckNoCorruption() throws Exception { /** * Sanity test, when there are corruptions induced. - * @throws Exception */ @Test public void testKeyValueContainerCheckCorruption() throws Exception { @@ -148,16 +142,12 @@ public void testKeyValueContainerCheckCorruption() throws Exception { int deletedBlocks = 1; int normalBlocks = 3; int chunksPerBlock = 4; - boolean valid = false; ContainerScrubberConfiguration sc = conf.getObject( ContainerScrubberConfiguration.class); // test Closed Container - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, 65536, + createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, chunksPerBlock); - File chunksPath = new File(containerData.getChunksPath()); - assertTrue(chunksPath.listFiles().length - == (deletedBlocks + normalBlocks) * chunksPerBlock); container.close(); @@ -169,12 +159,12 @@ public void testKeyValueContainerCheckCorruption() throws Exception { File dbFile = KeyValueContainerLocationUtil .getContainerDBFile(metaDir, containerID); containerData.setDbFile(dbFile); - try(ReferenceCountedDB db = + try (ReferenceCountedDB ignored = BlockUtils.getDB(containerData, conf); KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, new File(containerData.getContainerPath()))) { BlockData block = kvIter.nextBlock(); - assertTrue(!block.getChunks().isEmpty()); + assertFalse(block.getChunks().isEmpty()); ContainerProtos.ChunkInfo c = block.getChunks().get(0); File chunkFile = ChunkUtils.getChunkFile(containerData, ChunkInfo.getFromProtoBuf(c)); @@ -188,7 +178,7 @@ public void testKeyValueContainerCheckCorruption() throws Exception { } // metadata check should pass. - valid = kvCheck.fastCheck(); + boolean valid = kvCheck.fastCheck(); assertTrue(valid); // checksum validation should fail. @@ -201,46 +191,46 @@ public void testKeyValueContainerCheckCorruption() throws Exception { * Creates a container with normal and deleted blocks. * First it will insert normal blocks, and then it will insert * deleted blocks. - * @param containerId - * @param normalBlocks - * @param deletedBlocks - * @throws Exception */ private void createContainerWithBlocks(long containerId, int normalBlocks, - int deletedBlocks, int chunkLen, int chunksPerBlock) throws Exception { - long chunkCount; + int deletedBlocks, int chunksPerBlock) throws Exception { String strBlock = "block"; String strChunk = "-chunkFile"; - long totalBlks = normalBlocks + deletedBlocks; + long totalBlocks = normalBlocks + deletedBlocks; + int unitLen = 1024; + int chunkLen = 3 * unitLen; + int bytesPerChecksum = 2 * unitLen; Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, - chunkLen); - byte[] chunkData = generateRandomData(chunkLen); + bytesPerChecksum); + byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes(); ChecksumData checksumData = checksum.computeChecksum(chunkData); containerData = new KeyValueContainerData(containerId, (long) StorageUnit.BYTES.toBytes( - chunksPerBlock * chunkLen * totalBlks), + chunksPerBlock * chunkLen * totalBlocks), UUID.randomUUID().toString(), UUID.randomUUID().toString()); container = new KeyValueContainer(containerData, conf); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) { - chunkManager = new ChunkManagerImpl(true); + ChunkManagerImpl chunkManager = new ChunkManagerImpl(true); - assertTrue(containerData.getChunksPath() != null); + assertNotNull(containerData.getChunksPath()); File chunksPath = new File(containerData.getChunksPath()); assertTrue(chunksPath.exists()); // Initially chunks folder should be empty. - assertTrue(chunksPath.listFiles().length == 0); + File[] chunkFilesBefore = chunksPath.listFiles(); + assertNotNull(chunkFilesBefore); + assertEquals(0, chunkFilesBefore.length); List chunkList = new ArrayList<>(); - for (int i = 0; i < (totalBlks); i++) { + for (int i = 0; i < totalBlocks; i++) { BlockID blockID = new BlockID(containerId, i); BlockData blockData = new BlockData(blockID); chunkList.clear(); - for (chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { + for (long chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { String chunkName = strBlock + i + strChunk + chunkCount; ChunkInfo info = new ChunkInfo(chunkName, 0, chunkLen); info.setChecksumData(checksumData); @@ -269,15 +259,12 @@ private void createContainerWithBlocks(long containerId, int normalBlocks, blockData.getProtoBufMessage().toByteArray()); } } - } - } - private static byte[] generateRandomData(int length) { - assertTrue(length % 2 == 0); - ByteArrayOutputStream os = new ByteArrayOutputStream(length); - for (int i = 0; i < length; i++) { - os.write(i % 10); + File[] chunkFilesAfter = chunksPath.listFiles(); + assertNotNull(chunkFilesAfter); + assertEquals((deletedBlocks + normalBlocks) * chunksPerBlock, + chunkFilesAfter.length); } - return os.toByteArray(); } + } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java index fe2c705f90cf..b9b1beabdbd1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java @@ -27,7 +27,8 @@ import org.junit.Test; import org.mockito.Mockito; -import java.util.Iterator; +import java.util.Arrays; +import java.util.Collection; /** * This test verifies the container scrubber metrics functionality. @@ -42,8 +43,7 @@ public void testContainerMetaDataScrubberMetrics() { HddsVolume vol = Mockito.mock(HddsVolume.class); ContainerController cntrl = mockContainerController(vol); - ContainerMetadataScanner mc = new ContainerMetadataScanner(conf, - cntrl, c.getMetadataScanInterval()); + ContainerMetadataScanner mc = new ContainerMetadataScanner(c, cntrl); mc.runIteration(); Assert.assertEquals(1, mc.getMetrics().getNumScanIterations()); @@ -56,11 +56,11 @@ public void testContainerDataScrubberMetrics() { OzoneConfiguration conf = new OzoneConfiguration(); ContainerScrubberConfiguration c = conf.getObject( ContainerScrubberConfiguration.class); + c.setDataScanInterval(0); HddsVolume vol = Mockito.mock(HddsVolume.class); ContainerController cntrl = mockContainerController(vol); - ContainerDataScanner sc = new ContainerDataScanner(conf, cntrl, - vol, c.getBandwidthPerVolume()); + ContainerDataScanner sc = new ContainerDataScanner(c, cntrl, vol); sc.runIteration(); ContainerDataScrubberMetrics m = sc.getMetrics(); @@ -71,7 +71,7 @@ public void testContainerDataScrubberMetrics() { private ContainerController mockContainerController(HddsVolume vol) { // healthy container - Container c1 = Mockito.mock(Container.class); + Container c1 = Mockito.mock(Container.class); Mockito.when(c1.shouldScanData()).thenReturn(true); Mockito.when(c1.scanMetaData()).thenReturn(true); Mockito.when(c1.scanData( @@ -81,7 +81,7 @@ private ContainerController mockContainerController(HddsVolume vol) { // unhealthy container (corrupt data) ContainerData c2d = Mockito.mock(ContainerData.class); Mockito.when(c2d.getContainerID()).thenReturn(101L); - Container c2 = Mockito.mock(Container.class); + Container c2 = Mockito.mock(Container.class); Mockito.when(c2.scanMetaData()).thenReturn(true); Mockito.when(c2.shouldScanData()).thenReturn(true); Mockito.when(c2.scanData( @@ -92,20 +92,17 @@ private ContainerController mockContainerController(HddsVolume vol) { // unhealthy container (corrupt metadata) ContainerData c3d = Mockito.mock(ContainerData.class); Mockito.when(c3d.getContainerID()).thenReturn(102L); - Container c3 = Mockito.mock(Container.class); + Container c3 = Mockito.mock(Container.class); Mockito.when(c3.shouldScanData()).thenReturn(false); Mockito.when(c3.scanMetaData()).thenReturn(false); Mockito.when(c3.getContainerData()).thenReturn(c3d); - Iterator iter = Mockito.mock(Iterator.class); - Mockito.when(iter.hasNext()).thenReturn(true, true, true, false); - Mockito.when(iter.next()).thenReturn(c1, c2, c3); - + Collection> containers = Arrays.asList(c1, c2, c3); ContainerController cntrl = Mockito.mock(ContainerController.class); Mockito.when(cntrl.getContainers(vol)) - .thenReturn(iter); + .thenReturn(containers.iterator()); Mockito.when(cntrl.getContainers()) - .thenReturn(iter); + .thenReturn(containers.iterator()); return cntrl; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index b0d3a0f3b7b5..2d679a1cb45d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -41,12 +41,15 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.Random; import java.util.UUID; @@ -62,6 +65,9 @@ */ public class TestOzoneContainer { + private static final Logger LOG = + LoggerFactory.getLogger(TestOzoneContainer.class); + @Rule public TemporaryFolder folder = new TemporaryFolder(); @@ -148,7 +154,6 @@ public void testBuildContainerMap() throws Exception { @Test public void testContainerCreateDiskFull() throws Exception { long containerSize = (long) StorageUnit.MB.toBytes(100); - boolean diskSpaceException = false; // Format the volumes for (HddsVolume volume : volumeSet.getVolumesList()) { @@ -164,16 +169,14 @@ public void testContainerCreateDiskFull() throws Exception { keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); // we expect an out of space Exception - try { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - } catch (StorageContainerException e) { - if (e.getResult() == DISK_OUT_OF_SPACE) { - diskSpaceException = true; - } + StorageContainerException e = LambdaTestUtils.intercept( + StorageContainerException.class, + () -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId) + ); + if (!DISK_OUT_OF_SPACE.equals(e.getResult())) { + LOG.info("Unexpected error during container creation", e); } - - // Test failed if there was no exception - assertEquals(true, diskSpaceException); + assertEquals(DISK_OUT_OF_SPACE, e.getResult()); } //verify committed space on each volume diff --git a/hadoop-hdds/docs/content/beyond/Containers.md b/hadoop-hdds/docs/content/beyond/Containers.md index ea7e3b17c437..154d94dd2477 100644 --- a/hadoop-hdds/docs/content/beyond/Containers.md +++ b/hadoop-hdds/docs/content/beyond/Containers.md @@ -155,7 +155,7 @@ To test a development build you can create your own image and upload it to your ```bash -mvn clean install -f pom.ozone.xml -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone +mvn clean install -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone ``` The configured image will be used in all the generated kubernetes resources files (`image:` keys are adjusted during the build) diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md index 1e920d97cfc1..9ce0cc4b6a8f 100644 --- a/hadoop-hdds/docs/content/start/FromSource.md +++ b/hadoop-hdds/docs/content/start/FromSource.md @@ -38,7 +38,7 @@ dependencies to build Hadoop on your build machine. If you need instructions on how to build Hadoop, please look at the Apache Hadoop Website. ```bash -mvn -f pom.ozone.xml clean package -DskipTests=true +mvn clean package -DskipTests=true ``` This will build an ozone-\.tar.gz in your `hadoop-ozone/dist/target` directory. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java index 91e0153e357c..cd09da666501 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java @@ -161,7 +161,7 @@ public > void fireEvent( for (EventHandler handler : executorAndHandlers.getValue()) { queuedCount.incrementAndGet(); - if (LOG.isTraceEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug( "Delivering event {} to executor/handler {}: {}", event.getName(), diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index a17433732d4a..e38d107249c5 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -21,7 +21,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-main-ozone 0.5.0-SNAPSHOT - ../pom.ozone.xml hadoop-hdds diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index a0a722277adf..4c182c355b90 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -153,7 +153,9 @@ public void stop() throws IOException { public AllocatedBlock allocateBlock(final long size, ReplicationType type, ReplicationFactor factor, String owner, ExcludeList excludeList) throws IOException { - LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor); + if (LOG.isTraceEnabled()) { + LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor); + } ScmUtils.preCheck(ScmOps.allocateBlock, safeModePrecheck); if (size < 0 || size > containerSize) { LOG.warn("Invalid block size requested : {}", size); @@ -241,8 +243,10 @@ private AllocatedBlock newBlock(ContainerInfo containerInfo) { AllocatedBlock.Builder abb = new AllocatedBlock.Builder() .setContainerBlockID(new ContainerBlockID(containerID, localID)) .setPipeline(pipeline); - LOG.trace("New block allocated : {} Container ID: {}", localID, - containerID); + if (LOG.isTraceEnabled()) { + LOG.trace("New block allocated : {} Container ID: {}", localID, + containerID); + } pipelineManager.incNumBlocksAllocatedMetric(pipeline.getId()); return abb.build(); } catch (PipelineNotFoundException ex) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index b5e5d16b3f4b..22266b6ce7aa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -171,11 +171,13 @@ public EmptyTaskResult call() throws Exception { // offline for sometime, the cached commands be flooded. eventPublisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, new CommandForDatanode<>(dnId, new DeleteBlocksCommand(dnTXs))); - LOG.debug( - "Added delete block command for datanode {} in the queue," - + " number of delete block transactions: {}, TxID list: {}", - dnId, dnTXs.size(), String.join(",", - transactions.getTransactionIDList(dnId))); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Added delete block command for datanode {} in the queue," + + " number of delete block transactions: {}, TxID list: {}", + dnId, dnTXs.size(), String.join(",", + transactions.getTransactionIDList(dnId))); + } } } containerManager.updateDeleteTransactionId(transactionMap); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java index d1479f7c2252..e90986508934 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java @@ -48,13 +48,17 @@ public void onMessage(CommandStatusReportFromDatanode report, Preconditions.checkNotNull(report); List cmdStatusList = report.getReport().getCmdStatusList(); Preconditions.checkNotNull(cmdStatusList); - LOGGER.trace("Processing command status report for dn: {}", report - .getDatanodeDetails()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Processing command status report for dn: {}", report + .getDatanodeDetails()); + } // Route command status to its watchers. cmdStatusList.forEach(cmdStatus -> { - LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus - .getCmdId(), cmdStatus.getType()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus + .getCmdId(), cmdStatus.getType()); + } if (cmdStatus.getType() == SCMCommandProto.Type.deleteBlocksCommand) { if (cmdStatus.getStatus() == CommandStatus.Status.EXECUTED) { publisher.fireEvent(SCMEvents.DELETE_BLOCK_STATUS, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java index 1665a773a47c..59be36b0d2b9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java @@ -76,8 +76,10 @@ void processContainerReplica(final DatanodeDetails datanodeDetails, .setSequenceId(replicaProto.getBlockCommitSequenceId()) .build(); - logger.debug("Processing replica of container {} from datanode {}", - containerId, datanodeDetails); + if (logger.isDebugEnabled()) { + logger.debug("Processing replica of container {} from datanode {}", + containerId, datanodeDetails); + } // Synchronized block should be replaced by container lock, // once we have introduced lock inside ContainerInfo. synchronized (containerManager.getContainer(containerId)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java index ce399eb89b8b..e79f268974cf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java @@ -48,8 +48,10 @@ public void onMessage( ContainerID containerId = ContainerID.valueof(action.getContainerID()); switch (action.getAction()) { case CLOSE: - LOG.debug("Closing container {} in datanode {} because the" + - " container is {}.", containerId, dd, action.getReason()); + if (LOG.isDebugEnabled()) { + LOG.debug("Closing container {} in datanode {} because the" + + " container is {}.", containerId, dd, action.getReason()); + } publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerId); break; default: diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index cd3f42321389..7dde8d75f94d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -305,7 +305,9 @@ ContainerInfo allocateContainer( pipelineManager.addContainerToPipeline(pipeline.getId(), ContainerID.valueof(containerID)); containerStateCount.incrementAndGet(containerInfo.getState()); - LOG.trace("New container allocated: {}", containerInfo); + if (LOG.isTraceEnabled()) { + LOG.trace("New container allocated: {}", containerInfo); + } return containerInfo; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java index a8f573025cb8..b58100066a3d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java @@ -54,8 +54,10 @@ public IncrementalContainerReportHandler( @Override public void onMessage(final IncrementalContainerReportFromDatanode report, final EventPublisher publisher) { - LOG.debug("Processing incremental container report from data node {}", - report.getDatanodeDetails().getUuid()); + if (LOG.isDebugEnabled()) { + LOG.debug("Processing incremental container report from data node {}", + report.getDatanodeDetails().getUuid()); + } boolean success = true; for (ContainerReplicaProto replicaProto : diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 8eccf451c989..6d49459b739f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -272,8 +272,10 @@ private Node chooseNode(List excludedNodes, Node affinityNode, " excludedNodes and affinityNode constrains.", null); } if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { - LOG.debug("Datanode {} is chosen for container. Required size is {}", - node.toString(), sizeRequired); + if (LOG.isDebugEnabled()) { + LOG.debug("Datanode {} is chosen for container. Required size is {}", + node.toString(), sizeRequired); + } metrics.incrDatanodeChooseSuccessCount(); if (isFallbacked) { metrics.incrDatanodeChooseFallbackCount(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index 288fa2deb14e..af44a8a043e5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -167,7 +167,9 @@ public void clearSet(T key) { if (attributeMap.containsKey(key)) { attributeMap.get(key).clear(); } else { - LOG.debug("key: {} does not exist in the attributeMap", key); + if (LOG.isDebugEnabled()) { + LOG.debug("key: {} does not exist in the attributeMap", key); + } } } @@ -183,13 +185,17 @@ public boolean remove(T key, ContainerID value) { if (attributeMap.containsKey(key)) { if (!attributeMap.get(key).remove(value)) { - LOG.debug("ContainerID: {} does not exist in the set pointed by " + - "key:{}", value, key); + if (LOG.isDebugEnabled()) { + LOG.debug("ContainerID: {} does not exist in the set pointed by " + + "key:{}", value, key); + } return false; } return true; } else { - LOG.debug("key: {} does not exist in the attributeMap", key); + if (LOG.isDebugEnabled()) { + LOG.debug("key: {} does not exist in the attributeMap", key); + } return false; } } @@ -206,7 +212,9 @@ public NavigableSet getCollection(T key) { if (this.attributeMap.containsKey(key)) { return Collections.unmodifiableNavigableSet(this.attributeMap.get(key)); } - LOG.debug("No such Key. Key {}", key); + if (LOG.isDebugEnabled()) { + LOG.debug("No such Key. Key {}", key); + } return EMPTY_SET; } @@ -237,7 +245,9 @@ public void update(T currentKey, T newKey, ContainerID value) LOG.error("error in update.", ex); if (removed) { insert(currentKey, value); - LOG.trace("reinserted the removed key. {}", currentKey); + if (LOG.isTraceEnabled()) { + LOG.trace("reinserted the removed key. {}", currentKey); + } } throw ex; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index d85028bf1613..5fc94008ff3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -308,8 +308,10 @@ public void updateState(ContainerID containerID, LifeCycleState currentState, // be in an inconsistent state, lifeCycleStateMap.update(currentState, newState, containerID); - LOG.trace("Updated the container {} to new state. Old = {}, new = " + - "{}", containerID, currentState, newState); + if (LOG.isTraceEnabled()) { + LOG.trace("Updated the container {} to new state. Old = {}, new = " + + "{}", containerID, currentState, newState); + } // Just flush both old and new data sets from the result cache. flushCache(currentInfo); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidHostStringException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidHostStringException.java new file mode 100644 index 000000000000..c4046c1456e9 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidHostStringException.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import java.io.IOException; + +/** + * Exception thrown by the NodeDecommissionManager when it encounters + * host strings it does not expect or understand. + */ + +public class InvalidHostStringException extends IOException { + public InvalidHostStringException(String msg) { + super(msg); + } + + public InvalidHostStringException(String msg, Exception e) { + super(msg, e); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java new file mode 100644 index 000000000000..60813ddd4b71 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -0,0 +1,286 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.UnknownHostException; +import java.util.LinkedList; +import java.util.List; + +/** + * Class used to manage datanodes scheduled for maintenance or decommission. + */ +public class NodeDecommissionManager { + + private NodeManager nodeManager; + private PipelineManager pipeLineManager; + private ContainerManager containerManager; + private OzoneConfiguration conf; + private boolean useHostnames; + + private List pendingNodes = new LinkedList<>(); + + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeAdminManager.class); + + + static class HostDefinition { + private String rawHostname; + private String hostname; + private int port; + + HostDefinition(String hostname) throws InvalidHostStringException { + this.rawHostname = hostname; + parseHostname(); + } + + public String getRawHostname() { + return rawHostname; + } + + public String getHostname() { + return hostname; + } + + public int getPort() { + return port; + } + + private void parseHostname() throws InvalidHostStringException{ + try { + // A URI *must* have a scheme, so just create a fake one + URI uri = new URI("my://"+rawHostname.trim()); + this.hostname = uri.getHost(); + this.port = uri.getPort(); + + if (this.hostname == null) { + throw new InvalidHostStringException("The string "+rawHostname+ + " does not contain a value hostname or hostname:port definition"); + } + } catch (URISyntaxException e) { + throw new InvalidHostStringException( + "Unable to parse the hoststring "+rawHostname, e); + } + } + } + + private List mapHostnamesToDatanodes(List hosts) + throws InvalidHostStringException { + List results = new LinkedList<>(); + for (String hostString : hosts) { + HostDefinition host = new HostDefinition(hostString); + InetAddress addr; + try { + addr = InetAddress.getByName(host.getHostname()); + } catch (UnknownHostException e) { + throw new InvalidHostStringException("Unable to resolve the host " + +host.getRawHostname(), e); + } + String dnsName; + if (useHostnames) { + dnsName = addr.getHostName(); + } else { + dnsName = addr.getHostAddress(); + } + List found = nodeManager.getNodesByAddress(dnsName); + if (found.size() == 0) { + throw new InvalidHostStringException("The string " + + host.getRawHostname()+" resolved to "+dnsName + + " is not found in SCM"); + } else if (found.size() == 1) { + if (host.getPort() != -1 && + !validateDNPortMatch(host.getPort(), found.get(0))) { + throw new InvalidHostStringException("The string "+ + host.getRawHostname()+" matched a single datanode, but the "+ + "given port is not used by that Datanode"); + } + results.add(found.get(0)); + } else if (found.size() > 1) { + DatanodeDetails match = null; + for(DatanodeDetails dn : found) { + if (validateDNPortMatch(host.getPort(), dn)) { + match = dn; + break; + } + } + if (match == null) { + throw new InvalidHostStringException("The string " + + host.getRawHostname()+ "matched multiple Datanodes, but no "+ + "datanode port matched the given port"); + } + results.add(match); + } + } + return results; + } + + /** + * Check if the passed port is used by the given DatanodeDetails object. If + * it is, return true, otherwise return false. + * @param port Port number to check if it is used by the datanode + * @param dn Datanode to check if it is using the given port + * @return True if port is used by the datanode. False otherwise. + */ + private boolean validateDNPortMatch(int port, DatanodeDetails dn) { + for (DatanodeDetails.Port p : dn.getPorts()) { + if (p.getValue() == port) { + return true; + } + } + return false; + } + + public NodeDecommissionManager(OzoneConfiguration conf, + NodeManager nodeManager, PipelineManager pipelineManager, + ContainerManager containerManager) { + this.conf = conf; + this.nodeManager = nodeManager; + this.pipeLineManager = pipelineManager; + this.containerManager = containerManager; + + useHostnames = conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + } + + public synchronized void decommissionNodes(List nodes) + throws InvalidHostStringException { + List dns = mapHostnamesToDatanodes(nodes); + for (DatanodeDetails dn : dns) { + try { + startDecommission(dn); + } catch (NodeNotFoundException e) { + // We already validated the host strings and retrieved the DnDetails + // object from the node manager. Therefore we should never get a + // NodeNotFoundException here expect if the node is remove in the + // very short window between validation and starting decom. Therefore + // log a warning and ignore the exception + LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + "decommission it", dn.getHostName()); + } + } + } + + public synchronized void startDecommission(DatanodeDetails dn) + throws NodeNotFoundException { + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + LOG.info("In decommission the op state is {}", opState); + if (opState != NodeOperationalState.DECOMMISSIONING + && opState != NodeOperationalState.DECOMMISSIONED) { + LOG.info("Starting Decommission for node {}", dn); + nodeManager.setNodeOperationalState( + dn, NodeOperationalState.DECOMMISSIONING); + pendingNodes.add(dn); + } else { + LOG.info("Start Decommission called on node {} in state {}. Nothing to "+ + "do.", dn, opState); + } + } + + public synchronized void recommissionNodes(List nodes) + throws InvalidHostStringException { + List dns = mapHostnamesToDatanodes(nodes); + for (DatanodeDetails dn : dns) { + try { + recommission(dn); + } catch (NodeNotFoundException e) { + // We already validated the host strings and retrieved the DnDetails + // object from the node manager. Therefore we should never get a + // NodeNotFoundException here expect if the node is remove in the + // very short window between validation and starting decom. Therefore + // log a warning and ignore the exception + LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + "recommission it", dn.getHostName()); + } + } + } + + public synchronized void recommission(DatanodeDetails dn) + throws NodeNotFoundException{ + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + if (opState != NodeOperationalState.IN_SERVICE) { + nodeManager.setNodeOperationalState( + dn, NodeOperationalState.IN_SERVICE); + pendingNodes.remove(dn); + LOG.info("Recommissioned node {}", dn); + } else { + LOG.info("Recommission called on node {} with state {}. "+ + "Nothing to do.", dn, opState); + } + } + + public synchronized void startMaintenanceNodes(List nodes, int endInHours) + throws InvalidHostStringException { + List dns = mapHostnamesToDatanodes(nodes); + for (DatanodeDetails dn : dns) { + try { + startMaintenance(dn, endInHours); + } catch (NodeNotFoundException e) { + // We already validated the host strings and retrieved the DnDetails + // object from the node manager. Therefore we should never get a + // NodeNotFoundException here expect if the node is remove in the + // very short window between validation and starting decom. Therefore + // log a warning and ignore the exception + LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + "start maintenance on it", dn.getHostName()); + } + } + } + + // TODO - If startMaintenance is called on a host already in maintenance, + // then we should update the end time? + public synchronized void startMaintenance(DatanodeDetails dn, int endInHours) + throws NodeNotFoundException { + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + if (opState != NodeOperationalState.ENTERING_MAINTENANCE && + opState != NodeOperationalState.IN_MAINTENANCE) { + nodeManager.setNodeOperationalState( + dn, NodeOperationalState.ENTERING_MAINTENANCE); + pendingNodes.add(dn); + LOG.info("Starting Maintenance for node {}", dn); + } else { + LOG.info("Starting Maintenance called on node {} with state {}. "+ + "Nothing to do.", dn, opState); + } + } + + private NodeStatus getNodeStatus(DatanodeDetails dn) + throws NodeNotFoundException { + NodeStatus nodeStatus = nodeManager.getNodeStatus(dn); + if (nodeStatus == null) { + throw new NodeNotFoundException(); + } + return nodeStatus; + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 205f2e1b22b2..252c38fe1c25 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -125,11 +125,19 @@ int getNodeCount( SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); /** - * Returns the node state of a specific node. + * Returns the node status of a specific node. * @param datanodeDetails DatanodeDetails - * @return Healthy/Stale/Dead. + * @return NodeStatus for the node */ - NodeState getNodeState(DatanodeDetails datanodeDetails); + NodeStatus getNodeStatus(DatanodeDetails datanodeDetails); + + /** + * Set the operation state of a node. + * @param datanodeDetails The datanode to set the new state for + * @param newState The new operational state for the node + */ + void setNodeOperationalState(DatanodeDetails datanodeDetails, + NodeOperationalState newState) throws NodeNotFoundException; /** * Get set of pipelines a datanode is part of. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index 1e1a50cd3db0..4177b63dd396 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -394,6 +394,21 @@ public List getAllNodes() { return nodeStateMap.getAllDatanodeInfos(); } + /** + * Sets the operational state of the given node. Intended to be called when + * a node is being decommissioned etc. + * + * @param dn The datanode having its state set + * @param newState The new operational State of the node. + */ + public void setNodeOperationalState(DatanodeDetails dn, + NodeOperationalState newState) throws NodeNotFoundException { + DatanodeInfo dni = nodeStateMap.getNodeInfo(dn.getUuid()); + if (dni.getNodeStatus().getOperationalState() != newState) { + nodeStateMap.updateNodeOperationalState(dn.getUuid(), newState); + } + } + /** * Gets set of pipelineID a datanode belongs to. * @param dnId - Datanode ID diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index e48eda1bb59e..407f026ad9ea 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -213,21 +213,32 @@ public int getNodeCount(NodeOperationalState nodeOpState, NodeState health) { } /** - * Returns the node state of a specific node. + * Returns the node status of a specific node. * * @param datanodeDetails Datanode Details - * @return Healthy/Stale/Dead/Unknown. + * @return NodeStatus for the node */ @Override - public NodeState getNodeState(DatanodeDetails datanodeDetails) { + public NodeStatus getNodeStatus(DatanodeDetails datanodeDetails) { try { - return nodeStateManager.getNodeStatus(datanodeDetails).getHealth(); + return nodeStateManager.getNodeStatus(datanodeDetails); } catch (NodeNotFoundException e) { // TODO: should we throw NodeNotFoundException? return null; } } + /** + * Set the operation state of a node. + * @param datanodeDetails The datanode to set the new state for + * @param newState The new operational state for the node + */ + @Override + public void setNodeOperationalState(DatanodeDetails datanodeDetails, + NodeOperationalState newState) throws NodeNotFoundException{ + nodeStateManager.setNodeOperationalState(datanodeDetails, newState); + } + /** * Closes this stream and releases any system resources associated with it. If * the stream is already closed then invoking this method has no effect. @@ -301,8 +312,10 @@ public RegisteredCommand register( processNodeReport(datanodeDetails, nodeReport); LOG.info("Registered Data node : {}", datanodeDetails); } catch (NodeAlreadyExistsException e) { - LOG.trace("Datanode is already registered. Datanode: {}", - datanodeDetails.toString()); + if (LOG.isTraceEnabled()) { + LOG.trace("Datanode is already registered. Datanode: {}", + datanodeDetails.toString()); + } } return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) @@ -677,7 +690,9 @@ private String nodeResolve(String hostname) { List resolvedHosts = dnsToSwitchMapping.resolve(hosts); if (resolvedHosts != null && !resolvedHosts.isEmpty()) { String location = resolvedHosts.get(0); - LOG.debug("Resolve datanode {} return location {}", hostname, location); + if (LOG.isDebugEnabled()) { + LOG.debug("Resolve datanode {} return location {}", hostname, location); + } return location; } else { LOG.error("Node {} Resolution failed. Please make sure that DNS table " + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index 2b11da9e92f6..793f4e2a5e27 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -74,7 +74,9 @@ public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, pipelineReportFromDatanode.getReport(); Preconditions.checkNotNull(dn, "Pipeline Report is " + "missing DatanodeDetails."); - LOGGER.trace("Processing pipeline report for dn: {}", dn); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Processing pipeline report for dn: {}", dn); + } for (PipelineReport report : pipelineReport.getPipelineReportList()) { try { processPipelineReport(report, dn); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 1cebef629ba6..3a4f826ffbab 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -190,7 +190,9 @@ public void shutdown() { protected void initializePipeline(Pipeline pipeline) throws IOException { final RaftGroup group = RatisHelper.newRaftGroup(pipeline); - LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); + if (LOG.isDebugEnabled()) { + LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); + } callRatisRpc(pipeline.getNodes(), (raftClient, peer) -> { RaftClientReply reply = raftClient.groupAdd(group, peer.getId()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 777a0b05aabd..20fa092b2d00 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -59,7 +59,9 @@ private RatisPipelineUtils() { static void destroyPipeline(Pipeline pipeline, Configuration ozoneConf, GrpcTlsConfig grpcTlsConfig) { final RaftGroup group = RatisHelper.newRaftGroup(pipeline); - LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group); + if (LOG.isDebugEnabled()) { + LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group); + } for (DatanodeDetails dn : pipeline.getNodes()) { try { destroyPipeline(dn, pipeline.getId(), ozoneConf, grpcTlsConfig); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 0d2f47000038..53bd95d55a4d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -57,6 +57,18 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerResponseProto; +import org.apache.hadoop.hdds.protocol.proto. + StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto; +import org.apache.hadoop.hdds.protocol.proto. + StorageContainerLocationProtocolProtos.DecommissionNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto. + StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto; +import org.apache.hadoop.hdds.protocol.proto. + StorageContainerLocationProtocolProtos.RecommissionNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto. + StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto; +import org.apache.hadoop.hdds.protocol.proto. + StorageContainerLocationProtocolProtos.StartMaintenanceNodesResponseProto; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -214,6 +226,27 @@ public ScmContainerLocationResponse processRequest( .setReplicationManagerStatusResponse(getReplicationManagerStatus( request.getSeplicationManagerStatusRequest())) .build(); + case DecommissionNodes: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setDecommissionNodesResponse(decommissionNodes( + request.getDecommissionNodesRequest())) + .build(); + case RecommissionNodes: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setRecommissionNodesResponse(recommissionNodes( + request.getRecommissionNodesRequest())) + .build(); + case StartMaintenanceNodes: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setStartMaintenanceNodesResponse(startMaintenanceNodes( + request.getStartMaintenanceNodesRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -390,4 +423,25 @@ public ReplicationManagerStatusResponseProto getReplicationManagerStatus( .setIsRunning(impl.getReplicationManagerStatus()).build(); } + public DecommissionNodesResponseProto decommissionNodes( + DecommissionNodesRequestProto request) throws IOException { + impl.decommissionNodes(request.getHostsList()); + return DecommissionNodesResponseProto.newBuilder() + .build(); + } + + public RecommissionNodesResponseProto recommissionNodes( + RecommissionNodesRequestProto request) throws IOException { + impl.recommissionNodes(request.getHostsList()); + return RecommissionNodesResponseProto.newBuilder().build(); + } + + public StartMaintenanceNodesResponseProto startMaintenanceNodes( + StartMaintenanceNodesRequestProto request) throws IOException { + impl.startMaintenanceNodes(request.getHostsList(), + (int)request.getEndInHours()); + return StartMaintenanceNodesResponseProto.newBuilder() + .build(); + } + } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index d9825074b218..38d846fb5fbd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -365,6 +365,43 @@ public List queryNode(HddsProtos.NodeState state, } + @Override + public void decommissionNodes(List nodes) throws IOException { + String remoteUser = getRpcRemoteUsername(); + try { + getScm().checkAdminAccess(remoteUser); + scm.getScmDecommissionManager().decommissionNodes(nodes); + } catch (Exception ex) { + LOG.error("Failed to decommission nodes", ex); + throw ex; + } + } + + @Override + public void recommissionNodes(List nodes) throws IOException { + String remoteUser = getRpcRemoteUsername(); + try { + getScm().checkAdminAccess(remoteUser); + scm.getScmDecommissionManager().recommissionNodes(nodes); + } catch (Exception ex) { + LOG.error("Failed to recommission nodes", ex); + throw ex; + } + } + + @Override + public void startMaintenanceNodes(List nodes, int endInHours) + throws IOException { + String remoteUser = getRpcRemoteUsername(); + try { + getScm().checkAdminAccess(remoteUser); + scm.getScmDecommissionManager().startMaintenanceNodes(nodes, endInHours); + } catch (Exception ex) { + LOG.error("Failed to place nodes into maintenance mode", ex); + throw ex; + } + } + @Override public void notifyObjectStageChange(StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Type type, long id, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 702102b5ba16..24572c7ab18c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -74,6 +74,7 @@ import org.apache.hadoop.hdds.scm.node.NodeReportHandler; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.StaleNodeHandler; +import org.apache.hadoop.hdds.scm.node.NodeDecommissionManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler; @@ -160,6 +161,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private ContainerManager containerManager; private BlockManager scmBlockManager; private final SCMStorageConfig scmStorageConfig; + private NodeDecommissionManager scmDecommissionManager; private SCMMetadataStore scmMetadataStore; @@ -335,6 +337,9 @@ public StorageContainerManager(OzoneConfiguration conf, clientProtocolServer, scmBlockManager, replicationManager, pipelineManager); + scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, + pipelineManager, containerManager); + eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager); eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager); eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler); @@ -494,10 +499,12 @@ private void initalizeMetadataStore(OzoneConfiguration conf, */ private void loginAsSCMUser(Configuration conf) throws IOException, AuthenticationException { - LOG.debug("Ozone security is enabled. Attempting login for SCM user. " - + "Principal: {}, keytab: {}", - conf.get(HDDS_SCM_KERBEROS_PRINCIPAL_KEY), - conf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY)); + if (LOG.isDebugEnabled()) { + LOG.debug("Ozone security is enabled. Attempting login for SCM user. " + + "Principal: {}, keytab: {}", + conf.get(HDDS_SCM_KERBEROS_PRINCIPAL_KEY), + conf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY)); + } if (SecurityUtil.getAuthenticationMethod(conf).equals( AuthenticationMethod.KERBEROS)) { @@ -697,10 +704,10 @@ public void onRemoval( ContainerStat stat = removalNotification.getValue(); // remove invalid container report metrics.decrContainerStat(stat); - LOG.debug( - "Remove expired container stat entry for datanode: " + - "{}.", - removalNotification.getKey()); + if (LOG.isDebugEnabled()) { + LOG.debug("Remove expired container stat entry for " + + "datanode: {}.", removalNotification.getKey()); + } } } }) @@ -927,6 +934,16 @@ public int getNodeCount(NodeState nodestate) { return scmNodeManager.getNodeCount(null, nodestate); } + /** + * Returns the node decommission manager. + * + * @return NodeDecommissionManager The decommission manger for the used by + * scm + */ + public NodeDecommissionManager getScmDecommissionManager() { + return scmDecommissionManager; + } + /** * Returns SCM container manager. */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 20a8b747c950..25cf5049515c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -262,10 +262,19 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { * @return Healthy/Stale/Dead. */ @Override - public HddsProtos.NodeState getNodeState(DatanodeDetails dd) { + public NodeStatus getNodeStatus(DatanodeDetails dd) { return null; } + /** + * Set the operation state of a node. + * @param datanodeDetails The datanode to set the new state for + * @param newState The new operational state for the node + */ + public void setNodeOperationalState(DatanodeDetails datanodeDetails, + HddsProtos.NodeOperationalState newState) throws NodeNotFoundException { + } + /** * Get set of pipelines a datanode is part of. * @param dnId - datanodeID diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java new file mode 100644 index 000000000000..4492a6e3ceef --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -0,0 +1,253 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Before; +import org.junit.Test; +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import java.util.Arrays; +import java.util.ArrayList; +import static junit.framework.TestCase.assertEquals; +import static org.assertj.core.api.Fail.fail; + +/** + * Unit tests for the decommision manager. + */ + +public class TestNodeDecommissionManager { + + private NodeDecommissionManager decom; + private StorageContainerManager scm; + private NodeManager nodeManager; + private OzoneConfiguration conf; + private String storageDir; + + @Before + public void setup() throws Exception { + conf = new OzoneConfiguration(); + storageDir = GenericTestUtils.getTempPath( + TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); + nodeManager = createNodeManager(conf); + decom = new NodeDecommissionManager(conf, nodeManager, null, null); + } + + @Test + public void testHostStringsParseCorrectly() + throws InvalidHostStringException { + NodeDecommissionManager.HostDefinition def = + new NodeDecommissionManager.HostDefinition("foobar"); + assertEquals("foobar", def.getHostname()); + assertEquals(-1, def.getPort()); + + def = new NodeDecommissionManager.HostDefinition(" foobar "); + assertEquals("foobar", def.getHostname()); + assertEquals(-1, def.getPort()); + + def = new NodeDecommissionManager.HostDefinition("foobar:1234"); + assertEquals("foobar", def.getHostname()); + assertEquals(1234, def.getPort()); + + def = new NodeDecommissionManager.HostDefinition( + "foobar.mycompany.com:1234"); + assertEquals("foobar.mycompany.com", def.getHostname()); + assertEquals(1234, def.getPort()); + + try { + def = new NodeDecommissionManager.HostDefinition("foobar:abcd"); + fail("InvalidHostStringException should have been thrown"); + } catch (InvalidHostStringException e) { + } + } + + @Test + public void testAnyInvalidHostThrowsException() + throws InvalidHostStringException{ + List dns = generateDatanodes(); + + // Try to decommission a host that does exist, but give incorrect port + try { + decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()+":10")); + fail("InvalidHostStringException expected"); + } catch (InvalidHostStringException e) { + } + + // Try to decommission a host that does not exist + try { + decom.decommissionNodes(Arrays.asList("123.123.123.123")); + fail("InvalidHostStringException expected"); + } catch (InvalidHostStringException e) { + } + + // Try to decommission a host that does exist and a host that does not + try { + decom.decommissionNodes(Arrays.asList( + dns.get(1).getIpAddress(), "123,123,123,123")); + fail("InvalidHostStringException expected"); + } catch (InvalidHostStringException e) { + } + + // Try to decommission a host with many DNs on the address with no port + try { + decom.decommissionNodes(Arrays.asList( + dns.get(0).getIpAddress())); + fail("InvalidHostStringException expected"); + } catch (InvalidHostStringException e) { + } + + // Try to decommission a host with many DNs on the address with a port + // that does not exist + try { + decom.decommissionNodes(Arrays.asList( + dns.get(0).getIpAddress()+":10")); + fail("InvalidHostStringException expected"); + } catch (InvalidHostStringException e) { + } + } + + @Test + public void testNodesCanBeDecommissionedAndRecommissioned() + throws InvalidHostStringException { + List dns = generateDatanodes(); + + // Decommission 2 valid nodes + decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress())); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + + // Running the command again gives no error - nodes already decommissioning + // are silently ignored. + decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress())); + + // Attempt to decommission dn(10) which has multiple hosts on the same IP + // and we hardcoded ports to 3456, 4567, 5678 + DatanodeDetails multiDn = dns.get(10); + String multiAddr = + multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue(); + decom.decommissionNodes(Arrays.asList(multiAddr)); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(multiDn).getOperationalState()); + + // Recommission all 3 hosts + decom.recommissionNodes(Arrays.asList( + multiAddr, dns.get(1).getIpAddress(), dns.get(2).getIpAddress())); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(10)).getOperationalState()); + } + + @Test + public void testNodesCanBePutIntoMaintenanceAndRecommissioned() + throws InvalidHostStringException { + List dns = generateDatanodes(); + + // Put 2 valid nodes into maintenance + decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress()), 100); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + + // Running the command again gives no error - nodes already decommissioning + // are silently ignored. + decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress()), 100); + + // Attempt to decommission dn(10) which has multiple hosts on the same IP + // and we hardcoded ports to 3456, 4567, 5678 + DatanodeDetails multiDn = dns.get(10); + String multiAddr = + multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue(); + decom.startMaintenanceNodes(Arrays.asList(multiAddr), 100); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(multiDn).getOperationalState()); + + // Recommission all 3 hosts + decom.recommissionNodes(Arrays.asList( + multiAddr, dns.get(1).getIpAddress(), dns.get(2).getIpAddress())); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(10)).getOperationalState()); + } + + private SCMNodeManager createNodeManager(OzoneConfiguration config) + throws IOException, AuthenticationException { + scm = HddsTestUtils.getScm(config); + return (SCMNodeManager) scm.getScmNodeManager(); + } + + /** + * Generate a list of random DNs and return the list. A total of 11 DNs will + * be generated and registered with the node manager. Index 0 and 10 will + * have the same IP and host and the rest will have unique IPs and Hosts. + * The DN at index 10, has 3 hard coded ports of 3456, 4567, 5678. All other + * DNs will have ports set to 0. + * @return The list of DatanodeDetails Generated + */ + private List generateDatanodes() { + List dns = new ArrayList<>(); + for (int i=0; i<10; i++) { + DatanodeDetails dn = TestUtils.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + // We have 10 random DNs, we want to create another one that is on the same + // host as some of the others. + DatanodeDetails multiDn = dns.get(0); + + DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); + builder.setUuid(UUID.randomUUID().toString()) + .setHostName(multiDn.getHostName()) + .setIpAddress(multiDn.getIpAddress()) + .addPort(DatanodeDetails.newPort( + DatanodeDetails.Port.Name.STANDALONE, 3456)) + .addPort(DatanodeDetails.newPort( + DatanodeDetails.Port.Name.RATIS, 4567)) + .addPort(DatanodeDetails.newPort( + DatanodeDetails.Port.Name.REST, 5678)) + .setNetworkLocation(multiDn.getNetworkLocation()); + + DatanodeDetails dn = builder.build(); + nodeManager.register(dn, null, null); + dns.add(dn); + return dns; + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java index bc28a438892a..9fbf2519f5e1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; @@ -185,6 +186,22 @@ public void testNodeCanTransitionThroughHealthStatesAndFiresEvents() eventPublisher.getLastEvent().getName()); } + @Test + public void testNodeOpStateCanBeSet() + throws NodeAlreadyExistsException, NodeNotFoundException { + DatanodeDetails dn = generateDatanode(); + nsm.addNode(dn); + + nsm.setNodeOperationalState(dn, + HddsProtos.NodeOperationalState.DECOMMISSIONED); + + NodeStatus newStatus = nsm.getNodeStatus(dn); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED, + newStatus.getOperationalState()); + assertEquals(NodeState.HEALTHY, + newStatus.getHealth()); + } + private DatanodeDetails generateDatanode() { String uuid = UUID.randomUUID().toString(); return DatanodeDetails.newBuilder().setUuid(uuid).build(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index a48b2a05ebb2..5a2e3c2109ac 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -52,17 +52,17 @@ * A Node Manager to test replication. */ public class ReplicationNodeManagerMock implements NodeManager { - private final Map nodeStateMap; + private final Map nodeStateMap; private final CommandQueue commandQueue; /** * A list of Datanodes and current states. - * @param nodeState A node state map. + * @param nodeStatus A node state map. */ - public ReplicationNodeManagerMock(Map nodeState, + public ReplicationNodeManagerMock(Map nodeStatus, CommandQueue commandQueue) { - Preconditions.checkNotNull(nodeState); - this.nodeStateMap = nodeState; + Preconditions.checkNotNull(nodeStatus); + this.nodeStateMap = nodeStatus; this.commandQueue = commandQueue; } @@ -179,10 +179,26 @@ public SCMNodeMetric getNodeStat(DatanodeDetails dd) { * @return Healthy/Stale/Dead. */ @Override - public NodeState getNodeState(DatanodeDetails dd) { + public NodeStatus getNodeStatus(DatanodeDetails dd) { return nodeStateMap.get(dd); } + /** + * Set the operation state of a node. + * @param dd The datanode to set the new state for + * @param newState The new operational state for the node + */ + @Override + public void setNodeOperationalState(DatanodeDetails dd, + HddsProtos.NodeOperationalState newState) throws NodeNotFoundException { + NodeStatus currentStatus = nodeStateMap.get(dd); + if (currentStatus != null) { + nodeStateMap.put(dd, new NodeStatus(newState, currentStatus.getHealth())); + } else { + throw new NodeNotFoundException(); + } + } + /** * Get set of pipelines a datanode is part of. * @param dnId - datanodeID @@ -313,10 +329,10 @@ public void clearMap() { * Adds a node to the existing Node manager. This is used only for test * purposes. * @param id DatanodeDetails - * @param state State you want to put that node to. + * @param status State you want to put that node to. */ - public void addNode(DatanodeDetails id, NodeState state) { - nodeStateMap.put(id, state); + public void addNode(DatanodeDetails id, NodeStatus status) { + nodeStateMap.put(id, status); } @Override diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java index ff30eca470e0..8649992971fb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java @@ -25,10 +25,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands; import org.apache.hadoop.hdds.scm.cli.pipeline.PipelineCommands; +import org.apache.hadoop.hdds.scm.cli.node.DatanodeAdminCommands; import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -36,17 +38,20 @@ import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmSecurityClient; import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; @@ -76,7 +81,8 @@ ContainerCommands.class, PipelineCommands.class, TopologySubcommand.class, - ReplicationManagerCommands.class + ReplicationManagerCommands.class, + DatanodeAdminCommands.class }, mixinStandardHelpOptions = true) public class SCMCLI extends GenericCli { @@ -136,8 +142,21 @@ public ScmClient createScmClient() NetUtils.getDefaultSocketFactory(ozoneConf), Client.getRpcTimeout(ozoneConf))), StorageContainerLocationProtocol.class, ozoneConf); - return new ContainerOperationClient( - client, new XceiverClientManager(ozoneConf)); + + XceiverClientManager xceiverClientManager = null; + if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) { + SecurityConfig securityConfig = new SecurityConfig(ozoneConf); + SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient( + (OzoneConfiguration) securityConfig.getConfiguration()); + String caCertificate = + scmSecurityProtocolClient.getCACertificate(); + xceiverClientManager = new XceiverClientManager(ozoneConf, + OzoneConfiguration.of(ozoneConf).getObject(XceiverClientManager + .ScmClientConfig.class), caCertificate); + } else { + xceiverClientManager = new XceiverClientManager(ozoneConf); + } + return new ContainerOperationClient(client, xceiverClientManager); } public void checkContainerExists(ScmClient scmClient, long containerId) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 288d9faf8017..5169c8077990 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,8 +60,7 @@ public class ListSubcommand implements Callable { private void outputContainerInfo(ContainerInfo containerInfo) throws IOException { // Print container report info. - LOG.info("{}", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - containerInfo.toJsonString())); + LOG.info("{}", containerInfo.toJsonString()); } @Override diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminCommands.java new file mode 100644 index 000000000000..dc7a6f4e4f1e --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminCommands.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.node; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.MissingSubcommandException; +import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; +import org.apache.hadoop.hdds.scm.cli.SCMCLI; + +import java.util.concurrent.Callable; + +/** + * Subcommand to group datanode admin related operations. + */ +@Command( + name = "dnadmin", + description = "Datanode Administration specific operations", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class, + subcommands = { + DatanodeAdminDecommissionSubCommand.class, + DatanodeAdminMaintenanceSubCommand.class, + DatanodeAdminRecommissionSubCommand.class + }) +public class DatanodeAdminCommands implements Callable { + + @ParentCommand + private SCMCLI parent; + + public SCMCLI getParent() { + return parent; + } + + @Override + public Void call() throws Exception { + throw new MissingSubcommandException( + this.parent.getCmd().getSubcommands().get("nodeadmin")); + } +} \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminDecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminDecommissionSubCommand.java new file mode 100644 index 000000000000..1406603eed34 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminDecommissionSubCommand.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.node; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +/** + * Decommission one or more datanodes. + */ +@Command( + name = "decommission", + description = "Decommission a datanode", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class DatanodeAdminDecommissionSubCommand implements Callable { + + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeAdminDecommissionSubCommand.class); + + @CommandLine.Parameters(description = "List of fully qualified host names") + private List hosts = new ArrayList(); + + @ParentCommand + private DatanodeAdminCommands parent; + + @Override + public Void call() throws Exception { + try (ScmClient scmClient = parent.getParent().createScmClient()) { + scmClient.decommissionNodes(hosts); + return null; + } + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminMaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminMaintenanceSubCommand.java new file mode 100644 index 000000000000..2e4b2b51fd4f --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminMaintenanceSubCommand.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.node; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +/** + * Place one or more datanodes into Maintenance Mode. + */ +@Command( + name = "maintenance", + description = "Put a datanode into Maintenance Mode", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class DatanodeAdminMaintenanceSubCommand implements Callable { + + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeAdminMaintenanceSubCommand.class); + + @CommandLine.Parameters(description = "List of fully qualified host names") + private List hosts = new ArrayList(); + + @CommandLine.Option(names = {"--end"}, + description = "Automatically end maintenance after the given hours. "+ + "By default, maintenance must be ended manually.") + private int endInHours = 0; + + @ParentCommand + private DatanodeAdminCommands parent; + + @Override + public Void call() throws Exception { + try (ScmClient scmClient = parent.getParent().createScmClient()) { + scmClient.startMaintenanceNodes(hosts, endInHours); + return null; + } + } +} \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminRecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminRecommissionSubCommand.java new file mode 100644 index 000000000000..eaa1280ae49d --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/DatanodeAdminRecommissionSubCommand.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.node; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +/** + * Place decommissioned or maintenance nodes back into service. + */ +@Command( + name = "recommission", + description = "Return a datanode to service", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class DatanodeAdminRecommissionSubCommand implements Callable { + + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeAdminRecommissionSubCommand.class); + + @CommandLine.Parameters(description = "List of fully qualified host names") + private List hosts = new ArrayList(); + + @ParentCommand + private DatanodeAdminCommands parent; + + @Override + public Void call() throws Exception { + try (ScmClient scmClient = parent.getParent().createScmClient()) { + scmClient.recommissionNodes(hosts); + return null; + } + } +} \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/package-info.java new file mode 100644 index 000000000000..dfb04b828765 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/node/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *

+ * SCM related cli tools. + */ +/** + * SCM related cli tools for Datanode Admin. + */ +package org.apache.hadoop.hdds.scm.cli.node; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index e707e4fea077..b179ca539569 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.ozone.client.io; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -100,10 +101,17 @@ public BlockOutputStreamEntryPool(OzoneManagerProtocol omClient, Preconditions.checkState(streamBufferMaxSize % streamBufferFlushSize == 0); Preconditions.checkState(blockSize % streamBufferMaxSize == 0); this.bufferPool = - new BufferPool(chunkSize, (int) streamBufferMaxSize / chunkSize); + new BufferPool(chunkSize, (int) streamBufferMaxSize / chunkSize, + xceiverClientManager.byteBufferToByteStringConversion()); } - public BlockOutputStreamEntryPool() { + /** + * A constructor for testing purpose only. + * + * @see KeyOutputStream#KeyOutputStream() + */ + @VisibleForTesting + BlockOutputStreamEntryPool() { streamEntries = new ArrayList<>(); omClient = null; keyArgs = null; @@ -185,10 +193,12 @@ public List getLocationInfoList() { .setPipeline(streamEntry.getPipeline()).build(); locationInfoList.add(info); } - LOG.debug( - "block written " + streamEntry.getBlockID() + ", length " + length - + " bcsID " + streamEntry.getBlockID() - .getBlockCommitSequenceId()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "block written " + streamEntry.getBlockID() + ", length " + length + + " bcsID " + streamEntry.getBlockID() + .getBlockCommitSequenceId()); + } } return locationInfoList; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index fa1672a1fa7d..ecbb3290a7dc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -97,8 +97,10 @@ private synchronized void initialize(String keyName, long keyLength = 0; for (int i = 0; i < blockInfos.size(); i++) { OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i); - LOG.debug("Adding stream for accessing {}. The stream will be " + - "initialized later.", omKeyLocationInfo); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding stream for accessing {}. The stream will be " + + "initialized later.", omKeyLocationInfo); + } addStream(omKeyLocationInfo, xceiverClientManager, verifyChecksum); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index ebe4477955a5..06351ab2c3d0 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ChecksumType; -import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.io.IOUtils; @@ -219,10 +218,6 @@ public RpcClient(Configuration conf, String omServiceId) throws IOException { OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL, OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL_DEFAULT); dtService = getOMProxyProvider().getCurrentProxyDelegationToken(); - boolean isUnsafeByteOperationsEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); - ByteStringHelper.init(isUnsafeByteOperationsEnabled); topologyAwareReadEnabled = conf.getBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT); @@ -444,10 +439,14 @@ public Token getDelegationToken(Text renewer) ozoneManagerClient.getDelegationToken(renewer); if (token != null) { token.setService(dtService); - LOG.debug("Created token {} for dtService {}", token, dtService); + if (LOG.isDebugEnabled()) { + LOG.debug("Created token {} for dtService {}", token, dtService); + } } else { - LOG.debug("Cannot get ozone delegation token for renewer {} to access " + - "service {}", renewer, dtService); + if (LOG.isDebugEnabled()) { + LOG.debug("Cannot get ozone delegation token for renewer {} to " + + "access service {}", renewer, dtService); + } } return token; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 7cd38ad32bac..8e129c9d2301 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -18,14 +18,13 @@ package org.apache.hadoop.ozone; import com.google.common.base.Joiner; -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.MessageDigest; @@ -34,16 +33,22 @@ import java.util.Collection; import java.util.Collections; import java.util.Optional; -import java.util.zip.GZIPOutputStream; +import java.util.stream.Collectors; import com.google.common.base.Strings; -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; + +import org.apache.commons.compress.archivers.ArchiveEntry; +import org.apache.commons.compress.archivers.ArchiveOutputStream; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.CompressorException; +import org.apache.commons.compress.compressors.CompressorOutputStream; +import org.apache.commons.compress.compressors.CompressorStreamFactory; import org.apache.commons.compress.utils.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -346,61 +351,51 @@ public static Collection emptyAsSingletonNull(Collection } /** - * Given a source directory, create a tar.gz file from it. - * - * @param sourcePath the path to the directory to be archived. - * @return tar.gz file + * Write OM DB Checkpoint to an output stream as a compressed file (tgz). + * @param checkpoint checkpoint file + * @param destination desination output stream. * @throws IOException */ - public static File createTarFile(Path sourcePath) throws IOException { - TarArchiveOutputStream tarOs = null; - try { - String sourceDir = sourcePath.toString(); - String fileName = sourceDir.concat(".tar.gz"); - FileOutputStream fileOutputStream = new FileOutputStream(fileName); - GZIPOutputStream gzipOutputStream = - new GZIPOutputStream(new BufferedOutputStream(fileOutputStream)); - tarOs = new TarArchiveOutputStream(gzipOutputStream); - File folder = new File(sourceDir); - File[] filesInDir = folder.listFiles(); - if (filesInDir != null) { - for (File file : filesInDir) { - addFilesToArchive(file.getName(), file, tarOs); + public static void writeOmDBCheckpointToStream(DBCheckpoint checkpoint, + OutputStream destination) + throws IOException { + + try (CompressorOutputStream gzippedOut = new CompressorStreamFactory() + .createCompressorOutputStream(CompressorStreamFactory.GZIP, + destination)) { + + try (ArchiveOutputStream archiveOutputStream = + new TarArchiveOutputStream(gzippedOut)) { + + Path checkpointPath = checkpoint.getCheckpointLocation(); + for (Path path : Files.list(checkpointPath) + .collect(Collectors.toList())) { + if (path != null) { + Path fileName = path.getFileName(); + if (fileName != null) { + includeFile(path.toFile(), fileName.toString(), + archiveOutputStream); + } + } } } - return new File(fileName); - } finally { - try { - org.apache.hadoop.io.IOUtils.closeStream(tarOs); - } catch (Exception e) { - LOG.error("Exception encountered when closing " + - "TAR file output stream: " + e); - } + } catch (CompressorException e) { + throw new IOException( + "Can't compress the checkpoint: " + + checkpoint.getCheckpointLocation(), e); } } - private static void addFilesToArchive(String source, File file, - TarArchiveOutputStream - tarFileOutputStream) + private static void includeFile(File file, String entryName, + ArchiveOutputStream archiveOutputStream) throws IOException { - tarFileOutputStream.putArchiveEntry(new TarArchiveEntry(file, source)); - if (file.isFile()) { - FileInputStream fileInputStream = new FileInputStream(file); - BufferedInputStream bufferedInputStream = - new BufferedInputStream(fileInputStream); - IOUtils.copy(bufferedInputStream, tarFileOutputStream); - tarFileOutputStream.closeArchiveEntry(); - fileInputStream.close(); - } else if (file.isDirectory()) { - tarFileOutputStream.closeArchiveEntry(); - File[] filesInDir = file.listFiles(); - if (filesInDir != null) { - for (File cFile : filesInDir) { - addFilesToArchive(cFile.getAbsolutePath(), cFile, - tarFileOutputStream); - } - } + ArchiveEntry archiveEntry = + archiveOutputStream.createArchiveEntry(file, entryName); + archiveOutputStream.putArchiveEntry(archiveEntry); + try (FileInputStream fis = new FileInputStream(file)) { + IOUtils.copy(fis, archiveOutputStream); } + archiveOutputStream.closeArchiveEntry(); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java index 2fdf543f31be..fb5665820628 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java @@ -75,7 +75,9 @@ public S3SecretValue getS3Secret(String kerberosID) throws IOException { } finally { omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID); } - LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result); + if (LOG.isTraceEnabled()) { + LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result); + } return result; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java index 62d8fdc2613a..32684de5b73f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java @@ -214,8 +214,10 @@ private Text computeDelegationTokenService() { @Override public void performFailover(OzoneManagerProtocolPB currentProxy) { int newProxyIndex = incrementProxyIndex(); - LOG.debug("Failing over OM proxy to index: {}, nodeId: {}", - newProxyIndex, omNodeIDList.get(newProxyIndex)); + if (LOG.isDebugEnabled()) { + LOG.debug("Failing over OM proxy to index: {}, nodeId: {}", + newProxyIndex, omNodeIDList.get(newProxyIndex)); + } } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java index bc64d6c5a1fd..c1930c85d03f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java @@ -61,7 +61,9 @@ private OMRatisHelper() { */ public static RaftClient newRaftClient(RpcType rpcType, String omId, RaftGroup group, RetryPolicy retryPolicy, Configuration conf) { - LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group); + if (LOG.isTraceEnabled()) { + LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group); + } final RaftProperties properties = new RaftProperties(); RaftConfigKeys.Rpc.setType(properties, rpcType); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 957437f17806..31f092446234 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.function.Consumer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.lock.LockManager; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK; + /** * Provides different locks to handle concurrency in OzoneMaster. * We also maintain lock hierarchy, based on the weight. @@ -75,6 +79,9 @@ public class OzoneManagerLock { private static final Logger LOG = LoggerFactory.getLogger(OzoneManagerLock.class); + private static final String READ_LOCK = "read"; + private static final String WRITE_LOCK = "write"; + private final LockManager manager; private final ThreadLocal lockSet = ThreadLocal.withInitial( () -> Short.valueOf((short)0)); @@ -85,7 +92,9 @@ public class OzoneManagerLock { * @param conf Configuration object */ public OzoneManagerLock(Configuration conf) { - manager = new LockManager<>(conf); + boolean fair = conf.getBoolean(OZONE_MANAGER_FAIR_LOCK, + OZONE_MANAGER_FAIR_LOCK_DEFAULT); + manager = new LockManager<>(conf, fair); } /** @@ -105,16 +114,69 @@ public OzoneManagerLock(Configuration conf) { * should be bucket name. For remaining all resource only one param should * be passed. */ + @Deprecated public boolean acquireLock(Resource resource, String... resources) { String resourceName = generateResourceName(resource, resources); + return lock(resource, resourceName, manager::writeLock, WRITE_LOCK); + } + + /** + * Acquire read lock on resource. + * + * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same + * thread acquiring lock again is allowed. + * + * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread + * acquiring lock again is not allowed. + * + * Special Note for USER_LOCK: Single thread can acquire single user lock/ + * multi user lock. But not both at the same time. + * @param resource - Type of the resource. + * @param resources - Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + public boolean acquireReadLock(Resource resource, String... resources) { + String resourceName = generateResourceName(resource, resources); + return lock(resource, resourceName, manager::readLock, READ_LOCK); + } + + + /** + * Acquire write lock on resource. + * + * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same + * thread acquiring lock again is allowed. + * + * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread + * acquiring lock again is not allowed. + * + * Special Note for USER_LOCK: Single thread can acquire single user lock/ + * multi user lock. But not both at the same time. + * @param resource - Type of the resource. + * @param resources - Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + public boolean acquireWriteLock(Resource resource, String... resources) { + String resourceName = generateResourceName(resource, resources); + return lock(resource, resourceName, manager::writeLock, WRITE_LOCK); + } + + private boolean lock(Resource resource, String resourceName, + Consumer lockFn, String lockType) { if (!resource.canLock(lockSet.get())) { String errorMessage = getErrorMessage(resource); LOG.error(errorMessage); throw new RuntimeException(errorMessage); } else { - manager.lock(resourceName); - LOG.debug("Acquired {} lock on resource {}", resource.name, - resourceName); + lockFn.accept(resourceName); + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name, + resourceName); + } lockSet.set(resource.setLock(lockSet.get())); return true; } @@ -197,20 +259,22 @@ public boolean acquireMultiUserLock(String firstUser, String secondUser) { if (compare == 0) { // both users are equal. - manager.lock(firstUser); + manager.writeLock(firstUser); } else { - manager.lock(firstUser); + manager.writeLock(firstUser); try { - manager.lock(secondUser); + manager.writeLock(secondUser); } catch (Exception ex) { // We got an exception acquiring 2nd user lock. Release already // acquired user lock, and throw exception to the user. - manager.unlock(firstUser); + manager.writeUnlock(firstUser); throw ex; } } - LOG.debug("Acquired {} lock on resource {} and {}", resource.name, - firstUser, secondUser); + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name, + firstUser, secondUser); + } lockSet.set(resource.setLock(lockSet.get())); return true; } @@ -240,35 +304,70 @@ public void releaseMultiUserLock(String firstUser, String secondUser) { if (compare == 0) { // both users are equal. - manager.unlock(firstUser); + manager.writeUnlock(firstUser); } else { - manager.unlock(firstUser); - manager.unlock(secondUser); + manager.writeUnlock(firstUser); + manager.writeUnlock(secondUser); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Release Write {} lock on resource {} and {}", resource.name, + firstUser, secondUser); } - LOG.debug("Release {} lock on resource {} and {}", resource.name, - firstUser, secondUser); lockSet.set(resource.clearLock(lockSet.get())); } /** - * Release lock on resource. + * Release write lock on resource. * @param resource - Type of the resource. * @param resources - Resource names on which user want to acquire lock. * For Resource type BUCKET_LOCK, first param should be volume, second param * should be bucket name. For remaining all resource only one param should * be passed. */ + public void releaseWriteLock(Resource resource, String... resources) { + String resourceName = generateResourceName(resource, resources); + unlock(resource, resourceName, manager::writeUnlock, WRITE_LOCK); + } + + /** + * Release read lock on resource. + * @param resource - Type of the resource. + * @param resources - Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + public void releaseReadLock(Resource resource, String... resources) { + String resourceName = generateResourceName(resource, resources); + unlock(resource, resourceName, manager::readUnlock, READ_LOCK); + } + + /** + * Release write lock on resource. + * @param resource - Type of the resource. + * @param resources - Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Deprecated public void releaseLock(Resource resource, String... resources) { String resourceName = generateResourceName(resource, resources); + unlock(resource, resourceName, manager::writeUnlock, WRITE_LOCK); + } + + private void unlock(Resource resource, String resourceName, + Consumer lockFn, String lockType) { // TODO: Not checking release of higher order level lock happened while // releasing lower order level lock, as for that we need counter for // locks, as some locks support acquiring lock again. - manager.unlock(resourceName); + lockFn.accept(resourceName); // clear lock - LOG.debug("Release {}, lock on resource {}", resource.name, - resource.name, resourceName); + if (LOG.isDebugEnabled()) { + LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name, + resourceName); + } lockSet.set(resource.clearLock(lockSet.get())); - } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java index b3f607a9c361..5cc782336a85 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java @@ -89,7 +89,7 @@ public Token generateToken(String user, if (LOG.isTraceEnabled()) { long expiryTime = tokenIdentifier.getExpiryDate(); String tokenId = tokenIdentifier.toString(); - LOG.trace("Issued delegation token -> expiryTime:{},tokenId:{}", + LOG.trace("Issued delegation token -> expiryTime:{}, tokenId:{}", expiryTime, tokenId); } // Pass blockId as service. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 7e03095cdc45..0de8ac63c3f0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -289,8 +289,10 @@ public OzoneTokenIdentifier cancelToken(Token token, String canceller) throws IOException { OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf( token.getIdentifier()); - LOG.debug("Token cancellation requested for identifier: {}", - formatTokenId(id)); + if (LOG.isDebugEnabled()) { + LOG.debug("Token cancellation requested for identifier: {}", + formatTokenId(id)); + } if (id.getUser() == null) { throw new InvalidToken("Token with no owner " + formatTokenId(id)); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java index dd2ab1fa2e50..68afaaf52b81 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java @@ -43,9 +43,13 @@ public OzoneDelegationTokenSelector() { @Override public Token selectToken(Text service, Collection> tokens) { - LOG.trace("Getting token for service {}", service); + if (LOG.isTraceEnabled()) { + LOG.trace("Getting token for service {}", service); + } Token token = getSelectedTokens(service, tokens); - LOG.debug("Got tokens: {} for service {}", token, service); + if (LOG.isDebugEnabled()) { + LOG.debug("Got tokens: {} for service {}", token, service); + } return token; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java index 78f0565b81dc..06fc071f32dd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java @@ -110,8 +110,10 @@ public byte[] createPassword(byte[] identifier, PrivateKey privateKey) @Override public byte[] createPassword(T identifier) { - logger.debug("Creating password for identifier: {}, currentKey: {}", - formatTokenId(identifier), currentKey.getKeyId()); + if (logger.isDebugEnabled()) { + logger.debug("Creating password for identifier: {}, currentKey: {}", + formatTokenId(identifier), currentKey.getKeyId()); + } byte[] password = null; try { password = createPassword(identifier.getBytes(), diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java index a788d0cb3a5f..ce743fead31a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java @@ -19,31 +19,39 @@ package org.apache.hadoop.ozone; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; import org.junit.rules.Timeout; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; /** * Unit tests for {@link OmUtils}. */ public class TestOmUtils { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + @Rule public Timeout timeout = new Timeout(60_000); @@ -96,22 +104,13 @@ public void testNoOmDbDirConfigured() { } @Test - public void testCreateTarFile() throws Exception { + public void testWriteCheckpointToOutputStream() throws Exception { - File tempSnapshotDir = null; FileInputStream fis = null; FileOutputStream fos = null; - File tarFile = null; try { - String testDirName = System.getProperty("java.io.tmpdir"); - if (!testDirName.endsWith("/")) { - testDirName += "/"; - } - testDirName += "TestCreateTarFile_Dir" + System.currentTimeMillis(); - tempSnapshotDir = new File(testDirName); - tempSnapshotDir.mkdirs(); - + String testDirName = folder.newFolder().getAbsolutePath(); File file = new File(testDirName + "/temp1.txt"); FileWriter writer = new FileWriter(file); writer.write("Test data 1"); @@ -122,14 +121,60 @@ public void testCreateTarFile() throws Exception { writer.write("Test data 2"); writer.close(); - tarFile = OmUtils.createTarFile(Paths.get(testDirName)); - Assert.assertNotNull(tarFile); - + File outputFile = + new File(Paths.get(testDirName, "output_file.tgz").toString()); + TestDBCheckpoint dbCheckpoint = new TestDBCheckpoint( + Paths.get(testDirName)); + OmUtils.writeOmDBCheckpointToStream(dbCheckpoint, + new FileOutputStream(outputFile)); + assertNotNull(outputFile); } finally { IOUtils.closeStream(fis); IOUtils.closeStream(fos); - FileUtils.deleteDirectory(tempSnapshotDir); - FileUtils.deleteQuietly(tarFile); } } + +} + +class TestDBCheckpoint implements DBCheckpoint { + + private Path checkpointFile; + + TestDBCheckpoint(Path checkpointFile) { + this.checkpointFile = checkpointFile; + } + + @Override + public Path getCheckpointLocation() { + return checkpointFile; + } + + @Override + public long getCheckpointTimestamp() { + return 0; + } + + @Override + public long getLatestSequenceNumber() { + return 0; + } + + @Override + public long checkpointCreationTimeTaken() { + return 0; + } + + @Override + public void cleanupCheckpoint() throws IOException { + FileUtils.deleteDirectory(checkpointFile.toFile()); + } + + @Override + public void setRatisSnapshotIndex(long omRatisSnapshotIndex) { + } + + @Override + public long getRatisSnapshotIndex() { + return 0; + } } diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index df193307d2f6..81551d1ed977 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -45,6 +45,11 @@ grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \ | cut -f2- -d' ' \ | sort -u >> "${REPORT_DIR}/summary.txt" +## Check if Maven was killed +if grep -q 'Killed.* mvn .* test ' "${REPORT_DIR}/output.log"; then + echo 'Maven test run was killed' >> "${REPORT_DIR}/summary.txt" +fi + #Collect of all of the report failes of FAILED tests while IFS= read -r -d '' dir; do while IFS=$'\n' read -r file; do diff --git a/hadoop-ozone/dev-support/checks/blockade.sh b/hadoop-ozone/dev-support/checks/blockade.sh index f8b25c176ac4..a48d2b592ba2 100755 --- a/hadoop-ozone/dev-support/checks/blockade.sh +++ b/hadoop-ozone/dev-support/checks/blockade.sh @@ -21,7 +21,7 @@ OZONE_VERSION=$(grep "" "$DIR/../../pom.xml" | sed 's/<[^>]*>//g' cd "$DIR/../../dist/target/ozone-$OZONE_VERSION/tests" || exit 1 source ${DIR}/../../dist/target/ozone-${OZONE_VERSION}/compose/ozoneblockade/.env -export HADOOP_RUNNER_VERSION +export OZONE_RUNNER_VERSION export HDDS_VERSION python -m pytest -s blockade diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh index 11973301b27a..f461da1456fa 100755 --- a/hadoop-ozone/dev-support/checks/build.sh +++ b/hadoop-ozone/dev-support/checks/build.sh @@ -17,5 +17,5 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 export MAVEN_OPTS="-Xmx4096m" -mvn -B -f pom.ozone.xml -Dmaven.javadoc.skip=true -DskipTests clean install +mvn -B -Dmaven.javadoc.skip=true -DskipTests clean install exit $? diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index 685bf14629e0..bdae1d6ce612 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -21,7 +21,7 @@ REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/checkstyle"} mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -mvn -B -fn checkstyle:check -f pom.ozone.xml +mvn -B -fn checkstyle:check #Print out the exact violations with parsing XML results with sed find "." -name checkstyle-errors.xml -print0 \ diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh index ccbf2ed678cc..980968480721 100755 --- a/hadoop-ozone/dev-support/checks/findbugs.sh +++ b/hadoop-ozone/dev-support/checks/findbugs.sh @@ -17,11 +17,11 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 if ! type unionBugs >/dev/null 2>&1 || ! type convertXmlToText >/dev/null 2>&1; then - mvn -B -fae compile spotbugs:check -f pom.ozone.xml + mvn -B -fae compile spotbugs:check exit $? fi -mvn -B -fae compile spotbugs:spotbugs -f pom.ozone.xml +mvn -B -fae compile spotbugs:spotbugs REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} mkdir -p "$REPORT_DIR" diff --git a/hadoop-ozone/dev-support/checks/integration.sh b/hadoop-ozone/dev-support/checks/integration.sh index ccd499d43219..f522224f67af 100755 --- a/hadoop-ozone/dev-support/checks/integration.sh +++ b/hadoop-ozone/dev-support/checks/integration.sh @@ -17,9 +17,9 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 export MAVEN_OPTS="-Xmx4096m" -mvn -B install -f pom.ozone.xml -DskipTests -mvn -B -fn test -f pom.ozone.xml -pl :hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools \ - -Dtest=\!TestMiniChaosOzoneCluster +mvn -B install -DskipTests +mvn -B -fn test -pl :hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools \ + -Dtest=\!TestMiniChaosOzoneCluster "$@" REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/integration"} mkdir -p "$REPORT_DIR" diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh index 9429026bd7ac..44b6d0bc4e74 100755 --- a/hadoop-ozone/dev-support/checks/unit.sh +++ b/hadoop-ozone/dev-support/checks/unit.sh @@ -17,7 +17,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 export MAVEN_OPTS="-Xmx4096m" -mvn -B -fn test -f pom.ozone.xml -pl \!:hadoop-ozone-integration-test,\!:hadoop-ozone-filesystem,\!:hadoop-ozone-tools +mvn -B -fn test -pl \!:hadoop-ozone-integration-test,\!:hadoop-ozone-filesystem,\!:hadoop-ozone-tools "$@" REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/unit"} mkdir -p "$REPORT_DIR" diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml index 25e35c8a368a..a9060d23d2d3 100644 --- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml +++ b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml @@ -24,11 +24,6 @@ true - - pom.ozone.xml - / - pom.xml - hadoop-ozone/dist/src/main/license/src/LICENSE.txt / @@ -62,7 +57,7 @@ . - pom.ozone.xml + pom.xml README.txt diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env index 8916fc3b7521..df9065c5ff47 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env @@ -15,4 +15,4 @@ # limitations under the License. HADOOP_VERSION=3 -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml index cd066355064f..7d8295d8817e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml @@ -37,7 +37,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -48,7 +48,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -59,7 +59,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["ozone","scm"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config index 3232a105f96e..63bbbd898733 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config @@ -31,51 +31,5 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-yyyy_mm_dd.log -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3 - #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config index b83f3323fab1..793623883312 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config @@ -75,12 +75,3 @@ CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40 CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings= CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false - -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env index 1ec33aefa999..27fc57662d66 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env @@ -19,4 +19,4 @@ HDDS_VERSION=@hdds.version@ # See: HADOOP-16092 for more details. HADOOP_IMAGE=flokkr/hadoop HADOOP_VERSION=2.7.7 -HADOOP_RUNNER_VERSION=@docker.ozone-runner.version@ +OZONE_RUNNER_VERSION=@docker.ozone-runner.version@ diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml index a23566b8a472..17f5ee535524 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../../..:/opt/hadoop ports: @@ -27,7 +27,7 @@ services: - docker-config - ../common-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: om volumes: - ../../..:/opt/hadoop @@ -41,7 +41,7 @@ services: - ../common-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: s3g volumes: - ../../..:/opt/hadoop @@ -52,7 +52,7 @@ services: - ../common-config command: ["/opt/hadoop/bin/ozone","s3g"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: scm volumes: - ../../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env index c6ef057b5188..4cb42717f680 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env @@ -19,4 +19,4 @@ HDDS_VERSION=@hdds.version@ # See: HADOOP-16092 for more details. HADOOP_IMAGE=flokkr/hadoop HADOOP_VERSION=3.1.2 -HADOOP_RUNNER_VERSION=@docker.ozone-runner.version@ +OZONE_RUNNER_VERSION=@docker.ozone-runner.version@ diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml index af1c96091851..e3696fcf70a7 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../../..:/opt/hadoop ports: @@ -27,7 +27,7 @@ services: - docker-config - ../common-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: om volumes: - ../../..:/opt/hadoop @@ -41,7 +41,7 @@ services: - ../common-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: s3g volumes: - ../../..:/opt/hadoop @@ -52,7 +52,7 @@ services: - ../common-config command: ["/opt/hadoop/bin/ozone","s3g"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: scm volumes: - ../../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env index d2c5aad9f98e..70ba4b692d4e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env @@ -17,4 +17,4 @@ HDDS_VERSION=@hdds.version@ HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=3 -HADOOP_RUNNER_VERSION=@docker.ozone-runner.version@ +OZONE_RUNNER_VERSION=@docker.ozone-runner.version@ diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml index 755b279749ca..c25d36cb9043 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../../..:/opt/hadoop ports: @@ -27,7 +27,7 @@ services: - docker-config - ../common-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: om volumes: - ../../..:/opt/hadoop @@ -41,7 +41,7 @@ services: - ../common-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: s3g volumes: - ../../..:/opt/hadoop @@ -52,7 +52,7 @@ services: - ../common-config command: ["/opt/hadoop/bin/ozone","s3g"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: scm volumes: - ../../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml index 116419e78a0e..2cd2ce80c16a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -27,7 +27,7 @@ services: env_file: - ./docker-config om1: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -40,7 +40,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] om2: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -53,7 +53,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] om3: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -66,7 +66,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 5c3b2a2c2db1..f3de99a50a79 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -35,51 +35,6 @@ OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 ASYNC_PROFILER_HOME=/opt/profiler -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml index 4cec246b4ffc..38e2ef330940 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -28,7 +28,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -40,7 +40,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -52,7 +52,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["/opt/hadoop/bin/ozone","scm"] recon: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config index e45353b78601..61d1378cded3 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config @@ -31,51 +31,6 @@ OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 ASYNC_PROFILER_HOME=/opt/profiler -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT +#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env b/hadoop-ozone/dist/src/main/compose/ozone-topology/.env index 19a73160ab89..249827bc91a9 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=0.5.0-SNAPSHOT -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml index 7b99a7b21f09..a66eff617edb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode_1: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -31,7 +31,7 @@ services: net: ipv4_address: 10.5.0.4 datanode_2: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -45,7 +45,7 @@ services: net: ipv4_address: 10.5.0.5 datanode_3: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -59,7 +59,7 @@ services: net: ipv4_address: 10.5.0.6 datanode_4: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -73,7 +73,7 @@ services: net: ipv4_address: 10.5.0.7 om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -88,7 +88,7 @@ services: net: ipv4_address: 10.5.0.70 scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config index cfbdfae26cb8..ac6a3679de3a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config @@ -33,18 +33,6 @@ OZONE-SITE.XML_dfs.network.topology.aware.read.enable=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 ASYNC_PROFILER_HOME=/opt/profiler -LOG4J.PROPERTIES_log4j.rootLogger=DEBUG, ARF -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN -LOG4J.PROPERTIES_log4j.appender.ARF=org.apache.log4j.RollingFileAppender -LOG4J.PROPERTIES_log4j.appender.ARF.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.ARF.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.appender.ARF.file=/opt/hadoop/logs/${module.name}-${user.name}.log HDDS_DN_OPTS=-Dmodule.name=datanode HDFS_OM_OPTS=-Dmodule.name=om HDFS_STORAGECONTAINERMANAGER_OPTS=-Dmodule.name=scm @@ -53,40 +41,3 @@ HDFS_SCM_CLI_OPTS=-Dmodule.name=scmcli #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml index 6bfe36c7d7c5..145ce3ebb176 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -28,7 +28,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -40,7 +40,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index c7a1647774f3..380b529cd33a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -29,51 +29,6 @@ OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 ASYNC_PROFILER_HOME=/opt/profiler -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index fbae76da434e..e06f817f3d81 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -31,6 +31,8 @@ start_docker_env execute_robot_test scm basic/basic.robot +execute_robot_test scm gdpr/gdpr.robot + stop_docker_env generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml index ac548890512b..703329fe144c 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -37,7 +37,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -48,7 +48,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["/opt/hadoop/bin/ozone","scm"] ozone_client: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config index af72465091c6..4d5466c6ab9e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config @@ -37,51 +37,6 @@ OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s OZONE-SITE.XML_dfs.ratis.server.failure.duration=35s HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env +++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml index a13aeaf37427..fa205407e872 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -37,7 +37,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -61,7 +61,7 @@ services: ports: - 9090:9090 freon: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop environment: @@ -79,7 +79,7 @@ services: ports: - 3000:3000 s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config index 538376ee9053..d2d345272a1f 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config @@ -35,16 +35,3 @@ HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 JAEGER_SAMPLER_PARAM=1 JAEGER_SAMPLER_TYPE=const JAEGER_AGENT_HOST=jaeger - -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN -LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-yyyy_mm_dd.log -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3 diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env +++ b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml index 829792950e59..78fd996a70ca 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml @@ -24,7 +24,7 @@ services: ports: - 9878:9878 datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -33,7 +33,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -44,7 +44,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -55,7 +55,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["ozone","scm"] s3g1: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -64,7 +64,7 @@ services: - ./docker-config command: ["ozone","s3g"] s3g2: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -73,7 +73,7 @@ services: - ./docker-config command: ["ozone","s3g"] s3g3: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config index 4ffe9a6674c7..d3efa2e884fa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config @@ -26,54 +26,6 @@ OZONE-SITE.XML_hdds.datanode.dir=/data/hdds HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-yyyy_mm_dd.log -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3 #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/.env b/hadoop-ozone/dist/src/main/compose/ozones3/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3/.env +++ b/hadoop-ozone/dist/src/main/compose/ozones3/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml index f4b8cc2d66f0..cc4bfd2268c6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -37,7 +37,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -48,7 +48,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["ozone","scm"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config index 4ffe9a6674c7..d3efa2e884fa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config @@ -26,54 +26,6 @@ OZONE-SITE.XML_hdds.datanode.dir=/data/hdds HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-yyyy_mm_dd.log -LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3 #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env index 8753b1dc9bb0..96ab163b4747 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env @@ -15,4 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config index 4e67a044b011..fe713e0dde21 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config @@ -31,9 +31,4 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env index c4253faa341e..37227ac42bf6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env @@ -16,4 +16,4 @@ HDDS_VERSION=${hdds.version} HADOOP_VERSION=3 -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml index 468506cad876..53e0142b2b65 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml @@ -37,7 +37,7 @@ services: - ./docker-config command: ["hadoop", "kms"] datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} networks: - ozone volumes: @@ -48,7 +48,7 @@ services: env_file: - docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: om networks: - ozone @@ -62,7 +62,7 @@ services: - docker-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: s3g networks: - ozone @@ -74,7 +74,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","s3g"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: scm networks: - ozone @@ -130,3 +130,6 @@ services: HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar WAIT_FOR: rm:8088 command: ["yarn","timelineserver"] +networks: + ozone: + name: ozone diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index be9dc1e3b51c..646fd021ce7f 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -119,55 +119,9 @@ CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40 CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings= CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR - #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT - OZONE_DATANODE_SECURE_USER=root KEYTAB_DIR=/etc/security/keytabs KERBEROS_KEYTABS=dn om scm HTTP testuser s3g rm nm yarn jhs hadoop spark diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env index c4253faa341e..37227ac42bf6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env @@ -16,4 +16,4 @@ HDDS_VERSION=${hdds.version} HADOOP_VERSION=3 -HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} \ No newline at end of file +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml index d202717f9d7e..de60a411116c 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml @@ -35,7 +35,7 @@ services: command: ["hadoop", "kms"] datanode: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -44,7 +44,7 @@ services: env_file: - docker-config om: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: om volumes: - ../..:/opt/hadoop @@ -56,7 +56,7 @@ services: - docker-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: s3g volumes: - ../..:/opt/hadoop @@ -66,7 +66,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","s3g"] recon: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: recon volumes: - ../..:/opt/hadoop @@ -78,7 +78,7 @@ services: WAITFOR: om:9874 command: ["/opt/hadoop/bin/ozone","recon"] scm: - image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} hostname: scm volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index 60d1fcf6ebe3..44af35ee85d7 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -65,14 +65,6 @@ CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.C CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab CORE-SITE.XML_hadoop.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.authentication.server -.AuthenticationFilter=DEBUG -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.authentication.server -.KerberosAuthenticationHandler=TRACE -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.http.HttpServer2=TRACE - - - CORE-SITE.XML_hadoop.security.authorization=true HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* @@ -82,55 +74,10 @@ HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT - OZONE_DATANODE_SECURE_USER=root SECURITY_ENABLED=true KEYTAB_DIR=/etc/security/keytabs diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 01106b861545..f32846386a9f 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -35,6 +35,8 @@ execute_robot_test scm ozonefs/ozonefs.robot execute_robot_test s3g s3 +execute_robot_test scm scmcli + stop_docker_env generate_report diff --git a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh index f90942eeef89..cb5f016c3103 100755 --- a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh +++ b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh @@ -63,7 +63,7 @@ if [ -n "$KERBEROS_ENABLED" ]; then echo "KDC ISSUER_SERVER => $ISSUER_SERVER" if [ -n "$SLEEP_SECONDS" ]; then - echo "Sleeping for $(SLEEP_SECONDS) seconds" + echo "Sleeping for ${SLEEP_SECONDS} seconds" sleep "$SLEEP_SECONDS" fi diff --git a/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot new file mode 100644 index 000000000000..6a6f0b0eb782 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Smoketest ozone cluster startup +Library OperatingSystem +Library BuiltIn +Resource ../commonlib.robot + +*** Variables *** + + +*** Test Cases *** +Run list pipeline + ${output} = Execute ozone scmcli pipeline list + Should contain ${output} Type:RATIS, Factor:ONE, State:OPEN \ No newline at end of file diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py index 143426614df1..1616083377b4 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py @@ -151,8 +151,8 @@ def start(self): # check if docker is up. - if "HADOOP_RUNNER_VERSION" not in os.environ: - self.__logger__.error("HADOOP_RUNNER_VERSION is not set.") + if "OZONE_RUNNER_VERSION" not in os.environ: + self.__logger__.error("OZONE_RUNNER_VERSION is not set.") sys.exit(1) if "HDDS_VERSION" not in os.environ: diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 68035f8d4233..ca1f17934582 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -53,6 +53,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.ozone.client.CertificateClientTestImpl; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.om.OMConfigKeys; @@ -153,6 +154,18 @@ public void init() { try { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); + + conf.setInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY, ServerSocketUtil + .getPort(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT, 100)); + conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, ServerSocketUtil + .getPort(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT, 100)); + conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY, + ServerSocketUtil.getPort(ScmConfigKeys + .OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100)); + conf.setInt(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY, + ServerSocketUtil.getPort(ScmConfigKeys + .OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100)); + DefaultMetricsSystem.setMiniClusterMode(true); final String path = folder.newFolder().toString(); metaDirPath = Paths.get(path, "om-meta"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 313a3c094fe5..395bda0d5ddc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.net.ServerSocket; +import java.nio.ByteBuffer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -193,10 +194,10 @@ public static ChunkInfo getChunk(long keyID, int seqNo, long offset, * @param len - Number of bytes. * @return byte array with valid data. */ - public static byte[] getData(int len) { + public static ByteBuffer getData(int len) { byte[] data = new byte[len]; r.nextBytes(data); - return data; + return ByteBuffer.wrap(data); } /** @@ -206,7 +207,7 @@ public static byte[] getData(int len) { * @param data - data array * @throws NoSuchAlgorithmException */ - public static void setDataChecksum(ChunkInfo info, byte[] data) + public static void setDataChecksum(ChunkInfo info, ByteBuffer data) throws OzoneChecksumException { Checksum checksum = new Checksum(); info.setChecksumData(checksum.computeChecksum(data)); @@ -232,7 +233,7 @@ public static ContainerCommandRequestProto getWriteChunkRequest( writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf()); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen); setDataChecksum(info, data); @@ -262,7 +263,7 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( throws Exception { ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = ContainerProtos.PutSmallFileRequestProto.newBuilder(); - byte[] data = getData(dataLen); + ByteBuffer data = getData(dataLen); ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen); setDataChecksum(info, data); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 36316c313913..e1d1a95f4e21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -201,7 +201,7 @@ public void testBlockDeletion() throws Exception { BlockDeletingServiceTestImpl svc = getBlockDeletinService(containerSet, conf, 1000); svc.start(); - GenericTestUtils.waitFor(() -> svc.isStarted(), 100, 3000); + GenericTestUtils.waitFor(svc::isStarted, 100, 3000); // Ensure 1 container was created List containerData = Lists.newArrayList(); @@ -210,7 +210,7 @@ public void testBlockDeletion() throws Exception { try(ReferenceCountedDB meta = BlockUtils.getDB( (KeyValueContainerData) containerData.get(0), conf)) { - Map containerMap = containerSet.getContainerMapCopy(); + Map> containerMap = containerSet.getContainerMapCopy(); // NOTE: this test assumes that all the container is KetValueContainer and // have DeleteTransactionId in KetValueContainerData. If other // types is going to be added, this test should be checked. @@ -261,7 +261,7 @@ public void testShutdownService() throws Exception { BlockDeletingServiceTestImpl service = getBlockDeletinService(containerSet, conf, 1000); service.start(); - GenericTestUtils.waitFor(() -> service.isStarted(), 100, 3000); + GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Run some deleting tasks and verify there are threads running service.runDeletingTasks(); @@ -340,7 +340,7 @@ public void testBlockDeletionTimeout() throws Exception { // The block deleting successfully and shouldn't catch timed // out warning log. - Assert.assertTrue(!newLog.getOutput().contains( + Assert.assertFalse(newLog.getOutput().contains( "Background task executes timed out, retrying in next interval")); } svc.shutdown(); @@ -351,9 +351,7 @@ private BlockDeletingServiceTestImpl getBlockDeletinService( OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class); Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet); Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null); - BlockDeletingServiceTestImpl service = - new BlockDeletingServiceTestImpl(ozoneContainer, timeout, conf); - return service; + return new BlockDeletingServiceTestImpl(ozoneContainer, timeout, conf); } @Test(timeout = 30000) @@ -382,7 +380,7 @@ public void testContainerThrottle() throws Exception { service.start(); try { - GenericTestUtils.waitFor(() -> service.isStarted(), 100, 3000); + GenericTestUtils.waitFor(service::isStarted, 100, 3000); // 1st interval processes 1 container 1 block and 10 chunks deleteAndWait(service, 1); Assert.assertEquals(10, getNumberOfChunksInContainers(containerSet)); @@ -395,7 +393,7 @@ public void testContainerThrottle() throws Exception { if (getNumberOfChunksInContainers(containerSet) == 0) { return true; } - } catch (Exception e) {} + } catch (Exception ignored) {} return false; }, 100, 100000); Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet)); @@ -436,7 +434,7 @@ public void testBlockThrottle() throws Exception { service.start(); try { - GenericTestUtils.waitFor(() -> service.isStarted(), 100, 3000); + GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Total blocks = 3 * 5 = 15 // block per task = 2 // number of containers = 5 @@ -453,10 +451,10 @@ public void testBlockThrottle() throws Exception { } private int getNumberOfChunksInContainers(ContainerSet containerSet) { - Iterator containerIterator = containerSet.getContainerIterator(); + Iterator> iterator = containerSet.getContainerIterator(); int numChunks = 0; - while (containerIterator.hasNext()) { - Container container = containerIterator.next(); + while (iterator.hasNext()) { + Container container = iterator.next(); File chunkDir = FileUtils.getFile( ((KeyValueContainerData) container.getContainerData()) .getChunksPath()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index c0415e81ff2e..ed482093dfbf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -70,7 +70,6 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -349,11 +348,11 @@ private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException { } ChunkInfo info = getChunk( blockID.getLocalID(), 0, 0, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); setDataChecksum(info, data); commitBytesBefore = container.getContainerData() .getVolume().getCommittedBytes(); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); commitBytesAfter = container.getContainerData() .getVolume().getCommittedBytes(); @@ -397,9 +396,9 @@ public void testWritReadManyChunks() throws IOException { Map fileHashMap = new HashMap<>(); for (int x = 0; x < chunkCount; x++) { ChunkInfo info = getChunk(blockID.getLocalID(), x, 0, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); String fileName = String.format("%s.data.%d", blockID.getLocalID(), x); fileHashMap.put(fileName, info); @@ -431,7 +430,7 @@ public void testWritReadManyChunks() throws IOException { for (int x = 0; x < chunkCount; x++) { String fileName = String.format("%s.data.%d", blockID.getLocalID(), x); ChunkInfo info = fileHashMap.get(fileName); - byte[] data = chunkManager + ByteBuffer data = chunkManager .readChunk(container, blockID, info, getDispatcherContext()); ChecksumData checksumData = checksum.computeChecksum(data); Assert.assertEquals(info.getChecksumData(), checksumData); @@ -456,21 +455,22 @@ public void testPartialRead() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); ChunkInfo info = getChunk( blockID.getLocalID(), 0, 0, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); - byte[] readData = chunkManager + ByteBuffer readData = chunkManager .readChunk(container, blockID, info, getDispatcherContext()); - assertTrue(Arrays.equals(data, readData)); + assertTrue(data.rewind().equals(readData.rewind())); ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length); - byte[] readData2 = chunkManager + ByteBuffer readData2 = chunkManager .readChunk(container, blockID, info2, getDispatcherContext()); - assertEquals(length, readData2.length); - assertTrue(Arrays.equals( - Arrays.copyOfRange(data, start, start + length), readData2)); + assertEquals(length, info2.getLen()); + boolean equals = + data.position(start).limit(start+length).equals(readData2.rewind()); + assertTrue(equals); } /** @@ -491,15 +491,17 @@ public void testOverWrite() throws IOException, BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); ChunkInfo info = getChunk( blockID.getLocalID(), 0, 0, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + data.rewind(); + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); + data.rewind(); // With the overwrite flag it should work now. info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true"); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); long bytesUsed = container.getContainerData().getBytesUsed(); Assert.assertEquals(datalen, bytesUsed); @@ -531,17 +533,18 @@ public void testMultipleWriteSingleRead() throws IOException, long offset = x * datalen; ChunkInfo info = getChunk( blockID.getLocalID(), 0, offset, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); oldSha.update(data); + data.rewind(); setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); } // Request to read the whole data in a single go. ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount); - byte[] newdata = + ByteBuffer newdata = chunkManager.readChunk(container, blockID, largeChunk, getDispatcherContext()); MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); @@ -566,9 +569,9 @@ public void testDeleteChunk() throws IOException, BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); ChunkInfo info = getChunk( blockID.getLocalID(), 0, 0, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); chunkManager.deleteChunk(container, blockID, info); exception.expect(StorageContainerException.class); @@ -681,9 +684,9 @@ public void testPutBlockWithLotsOfChunks() throws IOException, for (int x = 1; x < chunkCount; x++) { // with holes in the front (before x * datalen) info = getChunk(blockID.getLocalID(), x, x * datalen, datalen); - byte[] data = getData(datalen); + ByteBuffer data = getData(datalen); setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(data), + chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext()); totalSize += datalen; chunkList.add(info); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java index 863a2b3359fb..7fb9825f801a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerMetadataScanner; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -159,8 +160,10 @@ public void testOpenContainerIntegrity() throws Exception { deleteDirectory(chunksDir); Assert.assertFalse(chunksDir.exists()); - ContainerMetadataScanner sb = new ContainerMetadataScanner(ozoneConfig, - oc.getController(), 0); + ContainerScrubberConfiguration conf = ozoneConfig.getObject( + ContainerScrubberConfiguration.class); + ContainerMetadataScanner sb = new ContainerMetadataScanner(conf, + oc.getController()); sb.scrub(c); // wait for the incremental container report to propagate to SCM diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 75dd880d2306..3cba9b3effae 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -154,8 +154,6 @@ public void write(int b) throws IOException { Assert.assertTrue( omMetrics.getLastCheckpointCreationTimeTaken() == 0); - Assert.assertTrue( - omMetrics.getLastCheckpointTarOperationTimeTaken() == 0); Assert.assertTrue( omMetrics.getLastCheckpointStreamingTimeTaken() == 0); @@ -164,8 +162,6 @@ public void write(int b) throws IOException { Assert.assertTrue(tempFile.length() > 0); Assert.assertTrue( omMetrics.getLastCheckpointCreationTimeTaken() > 0); - Assert.assertTrue( - omMetrics.getLastCheckpointTarOperationTimeTaken() > 0); Assert.assertTrue( omMetrics.getLastCheckpointStreamingTimeTaken() > 0); } finally { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java new file mode 100644 index 000000000000..bada595f9963 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.scm.node; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static junit.framework.TestCase.assertEquals; +import static org.apache.hadoop.hdds.HddsConfigKeys.*; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; + +/** + * Test from the scmclient for decommission and maintenance. + */ + +public class TestDecommissionAndMaintenance { + private static final Logger LOG = + LoggerFactory.getLogger(TestDecommissionAndMaintenance.class); + + private static int numOfDatanodes = 5; + private MiniOzoneCluster cluster; + + private ContainerOperationClient scmClient; + + @Before + public void setUp() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + final int interval = 100; + + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + interval, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); + conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); + conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); + conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); + conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); + conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(numOfDatanodes) + .build(); + cluster.waitForClusterToBeReady(); + scmClient = new ContainerOperationClient(cluster + .getStorageContainerLocationClient(), + new XceiverClientManager(conf)); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testNodeCanBeDecommMaintAndRecommissioned() + throws IOException { + NodeManager nm = cluster.getStorageContainerManager().getScmNodeManager(); + + List dns = nm.getAllNodes(); + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dns.get(0)))); + + // Ensure one node is decommissioning + List decomNodes = nm.getNodes( + HddsProtos.NodeOperationalState.DECOMMISSIONING, + HddsProtos.NodeState.HEALTHY); + assertEquals(1, decomNodes.size()); + + scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dns.get(0)))); + + // Ensure zero nodes are now decommissioning + decomNodes = nm.getNodes( + HddsProtos.NodeOperationalState.DECOMMISSIONING, + HddsProtos.NodeState.HEALTHY); + assertEquals(0, decomNodes.size()); + + scmClient.startMaintenanceNodes(Arrays.asList( + getDNHostAndPort(dns.get(0))), 10); + + // None are decommissioning + decomNodes = nm.getNodes( + HddsProtos.NodeOperationalState.DECOMMISSIONING, + HddsProtos.NodeState.HEALTHY); + assertEquals(0, decomNodes.size()); + + // One is in Maintenance + decomNodes = nm.getNodes( + HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + HddsProtos.NodeState.HEALTHY); + assertEquals(1, decomNodes.size()); + + scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dns.get(0)))); + + // None are in maintenance + decomNodes = nm.getNodes( + HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + HddsProtos.NodeState.HEALTHY); + assertEquals(0, decomNodes.size()); + } + + private String getDNHostAndPort(DatanodeDetails dn) { + return dn.getHostName()+":"+dn.getPorts().get(0).getValue(); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 8a32dd63550f..d64eae4e6e4c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -219,7 +219,8 @@ public OmBucketInfo getBucketInfo(String volumeName, String bucketName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo value = metadataManager.getBucketTable().get(bucketKey); @@ -237,7 +238,7 @@ public OmBucketInfo getBucketInfo(String volumeName, String bucketName) } throw ex; } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } } @@ -520,7 +521,7 @@ public List getAcl(OzoneObj obj) throws IOException { } String volume = obj.getVolumeName(); String bucket = obj.getBucketName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); try { String dbBucketKey = metadataManager.getBucketKey(volume, bucket); OmBucketInfo bucketInfo = @@ -538,7 +539,7 @@ public List getAcl(OzoneObj obj) throws IOException { } throw ex; } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); } } @@ -557,7 +558,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) String volume = ozObject.getVolumeName(); String bucket = ozObject.getBucketName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); try { String dbBucketKey = metadataManager.getBucketKey(volume, bucket); OmBucketInfo bucketInfo = @@ -569,8 +570,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) } boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(), context); - LOG.debug("user:{} has access rights for bucket:{} :{} ", - context.getClientUgi(), ozObject.getBucketName(), hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for bucket:{} :{} ", + context.getClientUgi(), ozObject.getBucketName(), hasAccess); + } return hasAccess; } catch (IOException ex) { if(ex instanceof OMException) { @@ -581,7 +584,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) throw new OMException("Check access operation failed for " + "bucket:" + bucket, ex, INTERNAL_ERROR); } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index bae71bf51131..20b7fdfec534 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -193,7 +193,6 @@ private KeyManagerImpl(OzoneManager om, ScmClient scmClient, this.secretManager = secretManager; this.kmsProvider = kmsProvider; - start(conf); } @Override @@ -623,7 +622,8 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { String keyBytes = metadataManager.getOzoneKey( volumeName, bucketName, keyName); @@ -683,7 +683,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throw new OMException(ex.getMessage(), KEY_NOT_FOUND); } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } } @@ -1312,7 +1312,8 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { List multipartUploadKeys = @@ -1355,7 +1356,7 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, throw new OMException(ex.getMessage(), ResultCodes .LIST_MULTIPART_UPLOAD_PARTS_FAILED); } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } } @@ -1371,7 +1372,8 @@ public OmMultipartUploadListParts listParts(String volumeName, boolean isTruncated = false; int nextPartNumberMarker = 0; - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { String multipartKey = metadataManager.getMultipartKey(volumeName, bucketName, keyName, uploadID); @@ -1458,7 +1460,7 @@ public OmMultipartUploadListParts listParts(String volumeName, throw new OMException(ex.getMessage(), ResultCodes .LIST_MULTIPART_UPLOAD_PARTS_FAILED); } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } } @@ -1604,7 +1606,7 @@ public List getAcl(OzoneObj obj) throws IOException { String bucket = obj.getBucketName(); String keyName = obj.getKeyName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); try { validateBucket(volume, bucket); String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); @@ -1621,7 +1623,7 @@ public List getAcl(OzoneObj obj) throws IOException { } throw ex; } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); } } @@ -1649,7 +1651,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) .setKeyName(keyName) .build(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); try { validateBucket(volume, bucket); OmKeyInfo keyInfo = null; @@ -1659,8 +1661,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) if (keyInfo == null) { // the key does not exist, but it is a parent "dir" of some key // let access be determined based on volume/bucket/prefix ACL - LOG.debug("key:{} is non-existent parent, permit access to user:{}", - keyName, context.getClientUgi()); + if (LOG.isDebugEnabled()) { + LOG.debug("key:{} is non-existent parent, permit access to user:{}", + keyName, context.getClientUgi()); + } return true; } } catch (OMException e) { @@ -1676,8 +1680,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) boolean hasAccess = OzoneAclUtil.checkAclRight( keyInfo.getAcls(), context); - LOG.debug("user:{} has access rights for key:{} :{} ", - context.getClientUgi(), ozObject.getKeyName(), hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for key:{} :{} ", + context.getClientUgi(), ozObject.getKeyName(), hasAccess); + } return hasAccess; } catch (IOException ex) { if(ex instanceof OMException) { @@ -1688,7 +1694,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) throw new OMException("Check access operation failed for " + "key:" + keyName, ex, INTERNAL_ERROR); } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); } } @@ -1733,7 +1739,8 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { String bucketName = args.getBucketName(); String keyName = args.getKeyName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { // Check if this is the root of the filesystem. if (keyName.length() == 0) { @@ -1763,15 +1770,16 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { if (keys.iterator().hasNext()) { return new OzoneFileStatus(keyName); } - - LOG.debug("Unable to get file status for the key: volume:" + volumeName + - " bucket:" + bucketName + " key:" + keyName + " with error no " + - "such file exists:"); + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + + " {}, key: {}, with error: No such file exists.", volumeName, + bucketName, keyName); + } throw new OMException("Unable to get file status: volume: " + volumeName + " bucket: " + bucketName + " key: " + keyName, FILE_NOT_FOUND); } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } } @@ -1916,7 +1924,8 @@ public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) String bucketName = args.getBucketName(); String keyName = args.getKeyName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { OzoneFileStatus fileStatus = getFileStatus(args); if (fileStatus.isFile()) { @@ -1927,7 +1936,7 @@ public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) } //if key is not of type file or if key is not found we throw an exception } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } @@ -1954,7 +1963,8 @@ public List listStatus(OmKeyArgs args, boolean recursive, String keyName = args.getKeyName(); List fileStatusList = new ArrayList<>(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + bucketName); try { if (Strings.isNullOrEmpty(startKey)) { OzoneFileStatus fileStatus = getFileStatus(args); @@ -2016,7 +2026,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, } } } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } return fileStatusList; @@ -2127,8 +2137,10 @@ private void sortDatanodeInPipeline(OmKeyInfo keyInfo, String clientMachine) { List sortedNodes = scmClient.getBlockClient() .sortDatanodes(nodeList, clientMachine); k.getPipeline().setNodesInOrder(sortedNodes); - LOG.debug("Sort datanodes {} for client {}, return {}", nodes, - clientMachine, sortedNodes); + if (LOG.isDebugEnabled()) { + LOG.debug("Sort datanodes {} for client {}, return {}", nodes, + clientMachine, sortedNodes); + } } catch (IOException e) { LOG.warn("Unable to sort datanodes based on distance to " + "client, volume=" + keyInfo.getVolumeName() + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index dbbb065a3b85..81031838b239 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -23,9 +23,8 @@ import static org.apache.hadoop.ozone.OzoneConsts. OZONE_DB_CHECKPOINT_REQUEST_FLUSH; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.time.Instant; @@ -34,12 +33,9 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -102,8 +98,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { return; } - FileInputStream checkpointFileInputStream = null; - File checkPointTarFile = null; + DBCheckpoint checkpoint = null; try { boolean flush = false; @@ -131,8 +126,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { ratisSnapshotIndex = om.getRatisSnapshotIndex(); } - DBCheckpoint checkpoint = omDbStore.getCheckpoint(flush); - if (checkpoint == null) { + checkpoint = omDbStore.getCheckpoint(flush); + if (checkpoint == null || checkpoint.getCheckpointLocation() == null) { LOG.error("Unable to process metadata snapshot request. " + "Checkpoint request returned null."); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); @@ -141,49 +136,41 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { omMetrics.setLastCheckpointCreationTimeTaken( checkpoint.checkpointCreationTimeTaken()); - Instant start = Instant.now(); - checkPointTarFile = OmUtils.createTarFile( - checkpoint.getCheckpointLocation()); - Instant end = Instant.now(); - - long duration = Duration.between(start, end).toMillis(); - LOG.debug("Time taken to archive the checkpoint : " + duration + - " milliseconds"); - LOG.info("Checkpoint Tar location = " + - checkPointTarFile.getAbsolutePath()); - omMetrics.setLastCheckpointTarOperationTimeTaken(duration); - + Path file = checkpoint.getCheckpointLocation().getFileName(); + if (file == null) { + return; + } response.setContentType("application/x-tgz"); response.setHeader("Content-Disposition", "attachment; filename=\"" + - checkPointTarFile.getName() + "\""); + file.toString() + ".tgz\""); // Ratis snapshot index used when downloading DB checkpoint to OM follower response.setHeader(OM_RATIS_SNAPSHOT_INDEX, String.valueOf(ratisSnapshotIndex)); - checkpointFileInputStream = new FileInputStream(checkPointTarFile); - start = Instant.now(); - TransferFsImage.copyFileToStream(response.getOutputStream(), - checkPointTarFile, - checkpointFileInputStream, - throttler); - end = Instant.now(); + Instant start = Instant.now(); + OmUtils.writeOmDBCheckpointToStream(checkpoint, + response.getOutputStream()); + Instant end = Instant.now(); - duration = Duration.between(start, end).toMillis(); - LOG.debug("Time taken to write the checkpoint to response output " + + long duration = Duration.between(start, end).toMillis(); + LOG.info("Time taken to write the checkpoint to response output " + "stream: " + duration + " milliseconds"); omMetrics.setLastCheckpointStreamingTimeTaken(duration); - checkpoint.cleanupCheckpoint(); - } catch (IOException e) { + } catch (Exception e) { LOG.error( "Unable to process metadata snapshot request. ", e); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } finally { - if (checkPointTarFile != null) { - FileUtils.deleteQuietly(checkPointTarFile); + if (checkpoint != null) { + try { + checkpoint.cleanupCheckpoint(); + } catch (IOException e) { + LOG.error("Error trying to clean checkpoint at {} .", + checkpoint.getCheckpointLocation().toString()); + } } - IOUtils.closeStream(checkpointFileInputStream); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index de42be01705c..2d1ae30648db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -123,7 +123,6 @@ public class OMMetrics { // Metrics to track checkpointing statistics from last run. private @Metric MutableGaugeLong lastCheckpointCreationTimeTaken; - private @Metric MutableGaugeLong lastCheckpointTarOperationTimeTaken; private @Metric MutableGaugeLong lastCheckpointStreamingTimeTaken; private @Metric MutableCounterLong numBucketS3Creates; @@ -511,10 +510,6 @@ public void setLastCheckpointCreationTimeTaken(long val) { this.lastCheckpointCreationTimeTaken.set(val); } - public void setLastCheckpointTarOperationTimeTaken(long val) { - this.lastCheckpointTarOperationTimeTaken.set(val); - } - public void setLastCheckpointStreamingTimeTaken(long val) { this.lastCheckpointStreamingTimeTaken.set(val); } @@ -756,11 +751,6 @@ public long getLastCheckpointCreationTimeTaken() { return lastCheckpointCreationTimeTaken.value(); } - @VisibleForTesting - public long getLastCheckpointTarOperationTimeTaken() { - return lastCheckpointTarOperationTimeTaken.value(); - } - @VisibleForTesting public long getLastCheckpointStreamingTimeTaken() { return lastCheckpointStreamingTimeTaken.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6c085911e11b..95f21ae0ca33 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -23,6 +23,9 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.BlockID; @@ -619,23 +622,31 @@ public List listBuckets(final String volumeName, } int currentCount = 0; - try (TableIterator> - bucketIter = bucketTable.iterator()) { - KeyValue kv = bucketIter.seek(startKey); - while (currentCount < maxNumOfBuckets && bucketIter.hasNext()) { - kv = bucketIter.next(); - // Skip the Start Bucket if needed. - if (kv != null && skipStartKey && - kv.getKey().equals(startKey)) { + + // For Bucket it is full cache, so we can just iterate in-memory table + // cache. + Iterator, CacheValue>> iterator = + bucketTable.cacheIterator(); + + + while (currentCount < maxNumOfBuckets && iterator.hasNext()) { + Map.Entry, CacheValue> entry = + iterator.next(); + + String key = entry.getKey().getCacheKey(); + OmBucketInfo omBucketInfo = entry.getValue().getCacheValue(); + // Making sure that entry in cache is not for delete bucket request. + + if (omBucketInfo != null) { + if (key.equals(startKey) && skipStartKey) { continue; } - if (kv != null && kv.getKey().startsWith(seekPrefix)) { - result.add(kv.getValue()); + + // We should return only the keys, whose keys match with prefix and + // the keys after the startBucket. + if (key.startsWith(seekPrefix) && key.compareTo(startKey) > 0) { + result.add(omBucketInfo); currentCount++; - } else { - // The SeekPrefix does not match any more, we can break out of the - // loop. - break; } } } @@ -645,7 +656,12 @@ public List listBuckets(final String volumeName, @Override public List listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { + List result = new ArrayList<>(); + if (maxKeys <= 0) { + return result; + } + if (Strings.isNullOrEmpty(volumeName)) { throw new OMException("Volume name is required.", ResultCodes.VOLUME_NOT_FOUND); @@ -680,19 +696,56 @@ public List listKeys(String volumeName, String bucketName, seekPrefix = getBucketKey(volumeName, bucketName + OM_KEY_PREFIX); } int currentCount = 0; - try (TableIterator> keyIter = - getKeyTable() - .iterator()) { - KeyValue kv = keyIter.seek(seekKey); - while (currentCount < maxKeys && keyIter.hasNext()) { - kv = keyIter.next(); - // Skip the Start key if needed. - if (kv != null && skipStartKey && kv.getKey().equals(seekKey)) { - continue; + + + TreeMap cacheKeyMap = new TreeMap<>(); + Set deletedKeySet = new TreeSet<>(); + Iterator, CacheValue>> iterator = + keyTable.cacheIterator(); + + //TODO: We can avoid this iteration if table cache has stored entries in + // treemap. Currently HashMap is used in Cache. HashMap get operation is an + // constant time operation, where as for treeMap get is log(n). + // So if we move to treemap, the get operation will be affected. As get + // is frequent operation on table. So, for now in list we iterate cache map + // and construct treeMap which match with keyPrefix and are greater than or + // equal to startKey. Later we can revisit this, if list operation + // is becoming slow. + while (iterator.hasNext()) { + Map.Entry< CacheKey, CacheValue> entry = + iterator.next(); + + String key = entry.getKey().getCacheKey(); + OmKeyInfo omKeyInfo = entry.getValue().getCacheValue(); + // Making sure that entry in cache is not for delete key request. + + if (omKeyInfo != null) { + if (key.startsWith(seekPrefix) && key.compareTo(seekKey) >= 0) { + cacheKeyMap.put(key, omKeyInfo); } + } else { + deletedKeySet.add(key); + } + } + + // Get maxKeys from DB if it has. + + try (TableIterator> + keyIter = getKeyTable().iterator()) { + KeyValue< String, OmKeyInfo > kv; + keyIter.seek(seekKey); + // we need to iterate maxKeys + 1 here because if skipStartKey is true, + // we should skip that entry and return the result. + while (currentCount < maxKeys + 1 && keyIter.hasNext()) { + kv = keyIter.next(); if (kv != null && kv.getKey().startsWith(seekPrefix)) { - result.add(kv.getValue()); - currentCount++; + + // Entry should not be marked for delete, consider only those + // entries. + if(!deletedKeySet.contains(kv.getKey())) { + cacheKeyMap.put(kv.getKey(), kv.getValue()); + currentCount++; + } } else { // The SeekPrefix does not match any more, we can break out of the // loop. @@ -700,6 +753,28 @@ public List listKeys(String volumeName, String bucketName, } } } + + // Finally DB entries and cache entries are merged, then return the count + // of maxKeys from the sorted map. + currentCount = 0; + + for (Map.Entry cacheKey : cacheKeyMap.entrySet()) { + if (cacheKey.getKey().equals(seekKey) && skipStartKey) { + continue; + } + + result.add(cacheKey.getValue()); + currentCount++; + + if (currentCount == maxKeys) { + break; + } + } + + // Clear map and set. + cacheKeyMap.clear(); + deletedKeySet.clear(); + return result; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java index fa4be651dae6..79bc39f49846 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java @@ -88,7 +88,9 @@ public BackgroundTaskResult call() throws Exception { if (result.isSuccess()) { try { keyManager.deleteExpiredOpenKey(result.getObjectKey()); - LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); + if (LOG.isDebugEnabled()) { + LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); + } deletedSize += 1; } catch (IOException e) { LOG.warn("Failed to delete hanging-open key {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index a6503d73140a..0cd087eee236 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -734,10 +734,12 @@ private static void loginOMUser(OzoneConfiguration conf) if (SecurityUtil.getAuthenticationMethod(conf).equals( AuthenticationMethod.KERBEROS)) { - LOG.debug("Ozone security is enabled. Attempting login for OM user. " - + "Principal: {},keytab: {}", conf.get( - OZONE_OM_KERBEROS_PRINCIPAL_KEY), - conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY)); + if (LOG.isDebugEnabled()) { + LOG.debug("Ozone security is enabled. Attempting login for OM user. " + + "Principal: {}, keytab: {}", conf.get( + OZONE_OM_KERBEROS_PRINCIPAL_KEY), + conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY)); + } UserGroupInformation.setConfiguration(conf); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index 0eafff9dcbd9..c89b32ee7347 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -139,7 +139,10 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { OMPrefixAclOpResult omPrefixAclOpResult = removeAcl(obj, acl, prefixInfo); if (!omPrefixAclOpResult.isOperationsResult()) { - LOG.debug("acl {} does not exist for prefix path {} ", acl, prefixPath); + if (LOG.isDebugEnabled()) { + LOG.debug("acl {} does not exist for prefix path {} ", + acl, prefixPath); + } return false; } @@ -236,8 +239,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) if (lastNode != null && lastNode.getValue() != null) { boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue(). getAcls(), context); - LOG.debug("user:{} has access rights for ozObj:{} ::{} ", - context.getClientUgi(), ozObject, hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for ozObj:{} ::{} ", + context.getClientUgi(), ozObject, hasAccess); + } return hasAccess; } else { return true; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 4ea8529e2d31..7375eb89b26d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -108,7 +108,7 @@ private UserVolumeInfo delVolumeFromOwnerList(String volume, String owner) if (volumeList != null) { prevVolList.addAll(volumeList.getVolumeNamesList()); } else { - LOG.debug("volume:{} not found for user:{}"); + LOG.debug("volume:{} not found for user:{}", volume, owner); throw new OMException(ResultCodes.USER_NOT_FOUND); } @@ -319,7 +319,7 @@ public void setQuota(String volume, long quota) throws IOException { @Override public OmVolumeArgs getVolumeInfo(String volume) throws IOException { Preconditions.checkNotNull(volume); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); + metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume); try { String dbVolumeKey = metadataManager.getVolumeKey(volume); OmVolumeArgs volumeArgs = @@ -337,7 +337,7 @@ public OmVolumeArgs getVolumeInfo(String volume) throws IOException { } throw ex; } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume); } } @@ -423,7 +423,7 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws IOException { Preconditions.checkNotNull(volume); Preconditions.checkNotNull(userAcl); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); + metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume); try { String dbVolumeKey = metadataManager.getVolumeKey(volume); OmVolumeArgs volumeArgs = @@ -443,7 +443,7 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) } throw ex; } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume); } } @@ -503,7 +503,9 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.addAcl(acl); } catch (OMException ex) { - LOG.debug("Add acl failed.", ex); + if (LOG.isDebugEnabled()) { + LOG.debug("Add acl failed.", ex); + } return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); @@ -553,7 +555,9 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.removeAcl(acl); } catch (OMException ex) { - LOG.debug("Remove acl failed.", ex); + if (LOG.isDebugEnabled()) { + LOG.debug("Remove acl failed.", ex); + } return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); @@ -634,7 +638,7 @@ public List getAcl(OzoneObj obj) throws IOException { "VolumeManager. OzoneObj type:" + obj.getResourceType()); } String volume = obj.getVolumeName(); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); + metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume); try { String dbVolumeKey = metadataManager.getVolumeKey(volume); OmVolumeArgs volumeArgs = @@ -653,7 +657,7 @@ public List getAcl(OzoneObj obj) throws IOException { } throw ex; } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume); } } @@ -685,8 +689,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) Preconditions.checkState(volume.equals(volumeArgs.getVolume())); boolean hasAccess = volumeArgs.getAclMap().hasAccess( context.getAclRights(), context.getClientUgi()); - LOG.debug("user:{} has access rights for volume:{} :{} ", - context.getClientUgi(), ozObject.getVolumeName(), hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for volume:{} :{} ", + context.getClientUgi(), ozObject.getVolumeName(), hasAccess); + } return hasAccess; } catch (IOException ex) { LOG.error("Check access operation failed for volume:{}", volume, ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index b4f5b8d98fc9..e5cadffc4009 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -148,9 +148,11 @@ private void flushTransactions() { flushedTransactionCount.addAndGet(flushedTransactionsSize); flushIterations.incrementAndGet(); - LOG.debug("Sync Iteration {} flushed transactions in this " + - "iteration{}", flushIterations.get(), - flushedTransactionsSize); + if (LOG.isDebugEnabled()) { + LOG.debug("Sync Iteration {} flushed transactions in this " + + "iteration{}", flushIterations.get(), + flushedTransactionsSize); + } long lastRatisTransactionIndex = readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java index 2cbef50cb049..6f97f56241b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java @@ -99,8 +99,10 @@ public static OzoneManagerRatisClient newOzoneManagerRatisClient( } public void connect() { - LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}", - raftGroup.getGroupId().getUuid().toString(), omNodeID); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}", + raftGroup.getGroupId().getUuid().toString(), omNodeID); + } // TODO : XceiverClient ratis should pass the config value of // maxOutstandingRequests so as to set the upper bound on max no of async @@ -147,8 +149,7 @@ private OzoneManagerProtocolProtos.Status parseErrorStatus(String message) { if (message.contains(STATUS_CODE)) { String errorCode = message.substring(message.indexOf(STATUS_CODE) + STATUS_CODE.length()); - LOG.debug("Parsing error message for error code " + - errorCode); + LOG.debug("Parsing error message for error code {}", errorCode); return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim()); } else { return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; @@ -166,25 +167,27 @@ private CompletableFuture sendCommandAsync(OMRequest request) { CompletableFuture raftClientReply = sendRequestAsync(request); - return raftClientReply.whenComplete((reply, e) -> LOG.debug( - "received reply {} for request: cmdType={} traceID={} " + - "exception: {}", reply, request.getCmdType(), - request.getTraceID(), e)) - .thenApply(reply -> { - try { - Preconditions.checkNotNull(reply); - if (!reply.isSuccess()) { - RaftException exception = reply.getException(); - Preconditions.checkNotNull(exception, "Raft reply failure " + - "but no exception propagated."); - throw new CompletionException(exception); - } - return OMRatisHelper.getOMResponseFromRaftClientReply(reply); - - } catch (InvalidProtocolBufferException e) { - throw new CompletionException(e); - } - }); + return raftClientReply.whenComplete((reply, e) -> { + if (LOG.isDebugEnabled()) { + LOG.debug("received reply {} for request: cmdType={} traceID={} " + + "exception: {}", reply, request.getCmdType(), + request.getTraceID(), e); + } + }).thenApply(reply -> { + try { + Preconditions.checkNotNull(reply); + if (!reply.isSuccess()) { + RaftException exception = reply.getException(); + Preconditions.checkNotNull(exception, "Raft reply failure " + + "but no exception propagated."); + throw new CompletionException(exception); + } + return OMRatisHelper.getOMResponseFromRaftClientReply(reply); + + } catch (InvalidProtocolBufferException e) { + throw new CompletionException(e); + } + }); } /** @@ -198,7 +201,9 @@ private CompletableFuture sendRequestAsync( OMRequest request) { boolean isReadOnlyRequest = OmUtils.isReadOnly(request); ByteString byteString = OMRatisHelper.convertRequestToByteString(request); - LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request); + if (LOG.isDebugEnabled()) { + LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request); + } return isReadOnlyRequest ? raftClient.sendReadOnlyAsync(() -> byteString) : raftClient.sendAsync(() -> byteString); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 69a7ae93a81a..7cab9d2738ab 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -169,8 +169,10 @@ private OMResponse processReply(OMRequest omRequest, RaftClientReply reply) omResponse.setMessage(stateMachineException.getCause().getMessage()); omResponse.setStatus(parseErrorStatus( stateMachineException.getCause().getMessage())); - LOG.debug("Error while executing ratis request. " + - "stateMachineException: ", stateMachineException); + if (LOG.isDebugEnabled()) { + LOG.debug("Error while executing ratis request. " + + "stateMachineException: ", stateMachineException); + } return omResponse.build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 06ebcc5913ea..2b2448db770f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -148,10 +148,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, null); } - acquiredVolumeLock = metadataManager.getLock().acquireLock(VOLUME_LOCK, - volumeName); - acquiredBucketLock = metadataManager.getLock().acquireLock(BUCKET_LOCK, - volumeName, bucketName); + acquiredVolumeLock = + metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName); + acquiredBucketLock = metadataManager.getLock().acquireWriteLock( + BUCKET_LOCK, volumeName, bucketName); OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().get(volumeKey); @@ -191,11 +191,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredBucketLock) { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } if (acquiredVolumeLock) { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volumeName); + metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 568c939222b1..9469f887e18a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; /** * Handles DeleteBucket Request. @@ -87,7 +88,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); IOException exception = null; - boolean acquiredLock = false; + boolean acquiredBucketLock = false; + boolean acquiredVolumeLock = false; OMClientResponse omClientResponse = null; try { // check Acl @@ -99,7 +101,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // acquire lock - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquiredVolumeLock = + omMetadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName); + acquiredBucketLock = + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); // No need to check volume exists here, as bucket cannot be created @@ -142,10 +147,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ozoneManagerDoubleBufferHelper.add(omClientResponse, transactionLogIndex)); } - if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + if (acquiredBucketLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } + if (acquiredVolumeLock) { + omMetadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName); + } } // Performing audit logging outside of the lock. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index f8b91e9ee92a..6c5f5fa4146c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -102,7 +102,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, AuditLogger auditLogger = ozoneManager.getAuditLogger(); OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); IOException exception = null; - boolean acquiredLock = false; + boolean acquiredBucketLock = false; OMClientResponse omClientResponse = null; try { // check Acl @@ -112,10 +112,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, null); } - - // acquire lock - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, - volumeName, bucketName); + // acquire lock. + acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock( + BUCKET_LOCK, volumeName, bucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo oldBucketInfo = @@ -181,8 +180,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ozoneManagerDoubleBufferHelper.add(omClientResponse, transactionLogIndex)); } - if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + if (acquiredBucketLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 33255ebdd6f6..87ad6000bc54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -91,7 +91,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volume, null, null); } lockAcquired = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, + bucket); String dbBucketKey = omMetadataManager.getBucketKey(volume, bucket); omBucketInfo = omMetadataManager.getBucketTable().get(dbBucketKey); @@ -120,7 +121,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (lockAcquired) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume, + bucket); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java index 46db75df17cf..b97de955a51a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java @@ -103,7 +103,9 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, void onComplete(boolean operationResult, IOException exception, OMMetrics omMetrics) { if (operationResult) { - LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath()); + if (LOG.isDebugEnabled()) { + LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath()); + } } else { omMetrics.incNumBucketUpdateFails(); if (exception == null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 1c3943379f6a..4b591dbed2d5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -136,7 +136,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, CreateDirectoryResponse.newBuilder()).build()); } // acquire lock - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); // TODO: Not checking volume exist here, once we have full cache we can @@ -192,7 +192,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index b51a4d69e227..20b51747caaa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -180,7 +180,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); // acquire lock - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); OmBucketInfo bucketInfo = @@ -280,7 +280,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 69e54057f990..196d61c15d04 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -127,7 +127,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, commitKeyRequest.getClientID()); - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); @@ -166,7 +166,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ozoneManagerDoubleBufferHelper.add(omClientResponse, transactionLogIndex)); } - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 25966468f208..baa13ad87fe7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -164,7 +164,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // check Acl checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); - acquireLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); //TODO: We can optimize this get here, if getKmsProvider is null, then @@ -198,7 +198,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquireLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index eb366adfcf9d..ee4b9b2dc0dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -114,7 +114,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, String objectKey = omMetadataManager.getOzoneKey( volumeName, bucketName, keyName); - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); // Not doing bucket/volume checks here. In this way we can avoid db @@ -153,7 +153,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index eb8a59e50c7e..526473c23997 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -120,7 +120,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName); - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); // Not doing bucket/volume checks here. In this way we can avoid db @@ -176,7 +176,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index a022bc165293..d1fac4feb74d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -81,7 +81,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volume, bucket, key); } lockAcquired = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, + bucket); String dbKey = omMetadataManager.getOzoneKey(volume, bucket, key); omKeyInfo = omMetadataManager.getKeyTable().get(dbKey); @@ -111,7 +112,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (lockAcquired) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume, + bucket); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 87404861a77a..3b30e4a3f19d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -81,7 +81,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } lockAcquired = - omMetadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); + omMetadataManager.getLock().acquireWriteLock(PREFIX_LOCK, prefixPath); omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath); @@ -128,7 +128,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (lockAcquired) { - omMetadataManager.getLock().releaseLock(PREFIX_LOCK, + omMetadataManager.getLock().releaseWriteLock(PREFIX_LOCK, getOzoneObj().getPath()); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java index 7a7091d1a141..f3a352a2fbf3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java @@ -153,8 +153,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { // TODO to support S3 ACL later. - acquiredS3Lock = omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, - s3BucketName); + acquiredS3Lock = omMetadataManager.getLock().acquireWriteLock( + S3_BUCKET_LOCK, s3BucketName); // First check if this s3Bucket exists if (omMetadataManager.getS3Table().isExist(s3BucketName)) { @@ -165,9 +165,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMVolumeCreateResponse omVolumeCreateResponse = null; try { acquiredVolumeLock = - omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volumeName); - acquiredUserLock = omMetadataManager.getLock().acquireLock(USER_LOCK, - userName); + omMetadataManager.getLock().acquireWriteLock(VOLUME_LOCK, + volumeName); + acquiredUserLock = omMetadataManager.getLock().acquireWriteLock( + USER_LOCK, userName); // Check if volume exists, if it does not exist create // ozone volume. String volumeKey = omMetadataManager.getVolumeKey(volumeName); @@ -188,10 +189,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } finally { if (acquiredUserLock) { - omMetadataManager.getLock().releaseLock(USER_LOCK, userName); + omMetadataManager.getLock().releaseWriteLock(USER_LOCK, userName); } if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volumeName); + omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volumeName); } } @@ -227,7 +228,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredS3Lock) { - omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName); + omMetadataManager.getLock().releaseWriteLock( + S3_BUCKET_LOCK, s3BucketName); } } @@ -266,7 +268,7 @@ private OmBucketInfo createBucket(OMMetadataManager omMetadataManager, OmBucketInfo omBucketInfo = null; try { acquireBucketLock = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, s3BucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, s3BucketName); @@ -285,7 +287,7 @@ private OmBucketInfo createBucket(OMMetadataManager omMetadataManager, } } finally { if (acquireBucketLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, s3BucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java index 9f37828ab72a..5d5932ff3f59 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java @@ -106,8 +106,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; try { // TODO to support S3 ACL later. - acquiredS3Lock = omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, - s3BucketName); + acquiredS3Lock = omMetadataManager.getLock().acquireWriteLock( + S3_BUCKET_LOCK, s3BucketName); String s3Mapping = omMetadataManager.getS3Table().get(s3BucketName); @@ -118,8 +118,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName = getOzoneVolumeName(s3Mapping); acquiredBucketLock = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, - s3BucketName); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, s3BucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, s3BucketName); @@ -149,11 +149,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredBucketLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, s3BucketName); } if (acquiredS3Lock) { - omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName); + omMetadataManager.getLock().releaseWriteLock(S3_BUCKET_LOCK, + s3BucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 2fe2c65b6a64..df0e168e2e0f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -114,7 +114,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { // TODO to support S3 ACL later. acquiredBucketLock = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); @@ -189,7 +189,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredBucketLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index f176879a095d..b65328d325a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -99,7 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { // TODO to support S3 ACL later. acquiredLock = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); @@ -147,7 +147,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 0992fe0980d1..cf7db655a020 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -111,7 +111,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { // TODO to support S3 ACL later. acquiredLock = - omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); @@ -203,7 +203,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 60f943497c96..ace2dbc4f131 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -124,7 +124,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, multipartUploadList = new OmMultipartUploadCompleteList(partsMap); - acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); @@ -270,7 +270,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java index 039ac0e1cb5b..d8f6478576a6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -127,7 +127,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { String awsSecret = updateGetS3SecretRequest.getAwsSecret(); acquiredLock = - omMetadataManager.getLock().acquireLock(S3_SECRET_LOCK, kerberosID); + omMetadataManager.getLock().acquireWriteLock(S3_SECRET_LOCK, + kerberosID); S3SecretValue s3SecretValue = omMetadataManager.getS3SecretTable().get(kerberosID); @@ -168,7 +169,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse, transactionLogIndex)); } if (acquiredLock) { - omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID); + omMetadataManager.getLock().releaseWriteLock(S3_SECRET_LOCK, + kerberosID); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index c06069c2567a..69da19f244b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -135,10 +135,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, UserVolumeInfo volumeList = null; // acquire lock. - acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK, - volume); + acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( + VOLUME_LOCK, volume); - acquiredUserLock = omMetadataManager.getLock().acquireLock(USER_LOCK, + acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK, owner); String dbVolumeKey = omMetadataManager.getVolumeKey(volume); @@ -176,10 +176,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredUserLock) { - omMetadataManager.getLock().releaseLock(USER_LOCK, owner); + omMetadataManager.getLock().releaseWriteLock(USER_LOCK, owner); } if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 485536fc3287..f91b02d21f8b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -97,10 +97,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmVolumeArgs omVolumeArgs = null; OzoneManagerProtocolProtos.UserVolumeInfo newVolumeList = null; - acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK, - volume); + acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( + VOLUME_LOCK, volume); owner = getVolumeInfo(omMetadataManager, volume).getOwnerName(); - acquiredUserLock = omMetadataManager.getLock().acquireLock(USER_LOCK, + acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK, owner); String dbUserKey = omMetadataManager.getUserKey(owner); @@ -141,10 +141,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquiredUserLock) { - omMetadataManager.getLock().releaseLock(USER_LOCK, owner); + omMetadataManager.getLock().releaseWriteLock(USER_LOCK, owner); } if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 129b2f9fdd8e..d1f1e8bfe4b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -123,8 +123,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK, - volume); + acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( + VOLUME_LOCK, volume); omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); @@ -188,7 +188,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omMetadataManager.getLock().releaseMultiUserLock(newOwner, oldOwner); } if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 7826c97f2966..ef6d8ae01664 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -114,8 +114,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmVolumeArgs omVolumeArgs = null; - acquireVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK, - volume); + acquireVolumeLock = omMetadataManager.getLock().acquireWriteLock( + VOLUME_LOCK, volume); String dbVolumeKey = omMetadataManager.getVolumeKey(volume); omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); @@ -146,7 +146,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (acquireVolumeLock) { - omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index 89ca8daafffa..6b4dc75e8208 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -81,7 +81,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volume, null, null); } lockAcquired = - omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume); + omMetadataManager.getLock().acquireWriteLock(VOLUME_LOCK, volume); String dbVolumeKey = omMetadataManager.getVolumeKey(volume); omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); if (omVolumeArgs == null) { @@ -115,7 +115,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, transactionLogIndex)); } if (lockAcquired) { - omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume); + omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 01b5edc8d554..a5abbcca012a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -96,8 +96,10 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, @Override void onComplete(IOException ex) { if (ex == null) { - LOG.debug("Set acls: {} to volume: {} success!", - getAcls(), getVolumeName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Set acls: {} to volume: {} success!", + getAcls(), getVolumeName()); + } } else { LOG.error("Set acls {} to volume {} failed!", getAcls(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java index 66f489233417..2d305d7831a3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java @@ -48,7 +48,9 @@ public OzoneManagerHARequestHandlerImpl(OzoneManager om, @Override public OMResponse handleApplyTransaction(OMRequest omRequest, long transactionLogIndex) { - LOG.debug("Received OMRequest: {}, ", omRequest); + if (LOG.isDebugEnabled()) { + LOG.debug("Received OMRequest: {}, ", omRequest); + } Type cmdType = omRequest.getCmdType(); switch (cmdType) { case CreateVolume: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index d4c029b8b3b9..ff2c966983f4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -225,7 +225,9 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { } try { omClientResponse.getFlushFuture().get(); - LOG.trace("Future for {} is completed", request); + if (LOG.isTraceEnabled()) { + LOG.trace("Future for {} is completed", request); + } } catch (ExecutionException | InterruptedException ex) { // terminate OM. As if we are in this stage means, while getting // response from flush future, we got an exception. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 01e59b4fea8b..ef96e0cc27ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -149,7 +149,9 @@ public OzoneManagerRequestHandler(OzoneManager om) { @SuppressWarnings("methodlength") @Override public OMResponse handle(OMRequest request) { - LOG.debug("Received OMRequest: {}, ", request); + if (LOG.isDebugEnabled()) { + LOG.debug("Received OMRequest: {}, ", request); + } Type cmdType = request.getCmdType(); OMResponse.Builder responseBuilder = OMResponse.newBuilder() .setCmdType(cmdType) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java index 5acd37e09c8c..0b7c51a40640 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java @@ -79,20 +79,20 @@ public boolean checkAccess(IOzoneObj ozObject, RequestContext context) switch (objInfo.getResourceType()) { case VOLUME: - LOG.trace("Checking access for volume:" + objInfo); + LOG.trace("Checking access for volume: {}", objInfo); return volumeManager.checkAccess(objInfo, context); case BUCKET: - LOG.trace("Checking access for bucket:" + objInfo); + LOG.trace("Checking access for bucket: {}", objInfo); return (bucketManager.checkAccess(objInfo, context) && volumeManager.checkAccess(objInfo, context)); case KEY: - LOG.trace("Checking access for Key:" + objInfo); + LOG.trace("Checking access for Key: {}", objInfo); return (keyManager.checkAccess(objInfo, context) && prefixManager.checkAccess(objInfo, context) && bucketManager.checkAccess(objInfo, context) && volumeManager.checkAccess(objInfo, context)); case PREFIX: - LOG.trace("Checking access for Prefix:" + objInfo); + LOG.trace("Checking access for Prefix: {]", objInfo); return (prefixManager.checkAccess(objInfo, context) && bucketManager.checkAccess(objInfo, context) && volumeManager.checkAccess(objInfo, context)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java index 064d30ad1c20..2a17275a5667 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java @@ -29,8 +29,7 @@ private ObjectPrinter() { } public static String getObjectAsJson(Object o) throws IOException { - return JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(o)); + return JsonUtils.toJsonStringWithDefaultPrettyPrinter(o); } public static void printObjectAsJson(Object o) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java index 6b32f6400bd1..112e8f38079d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -92,8 +91,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().addAcl(obj, OzoneAcl.parseAcl(acl)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl added successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java index 0bb967c62f55..ccb5d46fcb12 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java @@ -75,8 +75,8 @@ public Void call() throws Exception { List result = client.getObjectStore().getAcl(obj); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(result))); + System.out.printf("%s%n", + JsonUtils.toJsonStringWithDefaultPrettyPrinter(result)); client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java index 635c34bd66f5..216f66c56293 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -68,7 +67,7 @@ public class RemoveAclBucketHandler extends Handler { */ @Override public Void call() throws Exception { - Objects.requireNonNull(acl, "New acl to be added not specified."); + Objects.requireNonNull(acl, "ACL to be removed not specified."); OzoneAddress address = new OzoneAddress(uri); address.ensureBucketAddress(); OzoneClient client = address.createClient(createOzoneConfiguration()); @@ -92,8 +91,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().removeAcl(obj, OzoneAcl.parseAcl(acl)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl removed successfully: " + result))); + System.out.printf("%s%n", "Acl removed successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java index 2fc43f9bd025..e603068198a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -92,8 +91,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().setAcl(obj, OzoneAcl.parseAcls(acls)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl set successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java index 13298dceb526..b4e81345b849 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -95,8 +94,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().addAcl(obj, OzoneAcl.parseAcl(acl)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl added successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java index edfa66aa3094..6423dbbb6e46 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java @@ -78,8 +78,8 @@ public Void call() throws Exception { List result = client.getObjectStore().getAcl(obj); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(result))); + System.out.printf("%s%n", + JsonUtils.toJsonStringWithDefaultPrettyPrinter(result)); client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java index 135972164231..f561aa2aeb6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -68,7 +67,7 @@ public class RemoveAclKeyHandler extends Handler { */ @Override public Void call() throws Exception { - Objects.requireNonNull(acl, "New acl to be added not specified."); + Objects.requireNonNull(acl, "ACL to be removed not specified."); OzoneAddress address = new OzoneAddress(uri); address.ensureKeyAddress(); OzoneClient client = address.createClient(createOzoneConfiguration()); @@ -95,8 +94,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().removeAcl(obj, OzoneAcl.parseAcl(acl)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl removed successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java index 397330591ea3..a6a4872f9532 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -94,8 +93,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().setAcl(obj, OzoneAcl.parseAcls(acls)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl set successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java index 7626b6598f95..6d1777c7d3bb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java @@ -71,7 +71,7 @@ public Void call() throws Exception { } System.out.printf("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(token.encodeToUrlString()))); + token.encodeToUrlString())); return null; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java index 93e4c2475270..24f910081127 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java @@ -65,7 +65,7 @@ public Void call() throws Exception { token.decodeFromUrlString(encodedToken); System.out.printf("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(token.toString()))); + token.toString())); return null; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java index acce64860dac..b9d57436287d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -89,8 +88,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().addAcl(obj, OzoneAcl.parseAcl(acl)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl added successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java index b4be3f8249d4..6c0bb207dd7d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java @@ -69,8 +69,8 @@ public Void call() throws Exception { OzoneObj.StoreType.valueOf(storeType)) .build(); List result = client.getObjectStore().getAcl(obj); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString(result))); + System.out.printf("%s%n", + JsonUtils.toJsonStringWithDefaultPrettyPrinter(result)); client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java index 9b3420b3f3a6..d984f4891f53 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -68,7 +67,7 @@ public class RemoveAclVolumeHandler extends Handler { */ @Override public Void call() throws Exception { - Objects.requireNonNull(acl, "New acl to be added not specified."); + Objects.requireNonNull(acl, "ACL to be removed not specified."); OzoneAddress address = new OzoneAddress(uri); address.ensureVolumeAddress(); OzoneClient client = address.createClient(createOzoneConfiguration()); @@ -89,8 +88,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().removeAcl(obj, OzoneAcl.parseAcl(acl)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl removed successfully: " + result))); + System.out.printf("%s%n", "Acl removed successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java index e3299e35946f..185f862e2942 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.web.ozShell.Handler; import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -92,8 +91,8 @@ public Void call() throws Exception { boolean result = client.getObjectStore().setAcl(obj, OzoneAcl.parseAcls(acls)); - System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - JsonUtils.toJsonString("Acl set successfully: " + result))); + System.out.printf("%s%n", "Acl set successfully: " + result); + client.close(); return null; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java index f2e992b90395..3c707ba1e18b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java @@ -95,6 +95,7 @@ public void checkIfDeleteServiceisDeletingKeys() new KeyManagerImpl( new ScmBlockLocationTestingClient(null, null, 0), metaMgr, conf, UUID.randomUUID().toString(), null); + keyManager.start(conf); final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 1); KeyDeletingService keyDeletingService = @@ -117,6 +118,7 @@ public void checkIfDeleteServiceWithFailingSCM() new KeyManagerImpl( new ScmBlockLocationTestingClient(null, null, 1), metaMgr, conf, UUID.randomUUID().toString(), null); + keyManager.start(conf); final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 1); KeyDeletingService keyDeletingService = @@ -144,6 +146,7 @@ public void checkDeletionForEmptyKey() new KeyManagerImpl( new ScmBlockLocationTestingClient(null, null, 1), metaMgr, conf, UUID.randomUUID().toString(), null); + keyManager.start(conf); final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 0); KeyDeletingService keyDeletingService = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java new file mode 100644 index 000000000000..e0e4c61d3e54 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -0,0 +1,417 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.List; +import java.util.TreeSet; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; + +/** + * Tests OzoneManager MetadataManager. + */ +public class TestOmMetadataManager { + + private OMMetadataManager omMetadataManager; + private OzoneConfiguration ozoneConfiguration; + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + + @Before + public void setup() throws Exception { + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OZONE_OM_DB_DIRS, + folder.getRoot().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + } + @Test + public void testListBuckets() throws Exception { + + String volumeName1 = "volumeA"; + String prefixBucketNameWithOzoneOwner = "ozoneBucket"; + String prefixBucketNameWithHadoopOwner = "hadoopBucket"; + + TestOMRequestUtils.addVolumeToDB(volumeName1, omMetadataManager); + + + TreeSet volumeABucketsPrefixWithOzoneOwner = new TreeSet<>(); + TreeSet volumeABucketsPrefixWithHadoopOwner = new TreeSet<>(); + for (int i=1; i<= 100; i++) { + if (i % 2 == 0) { + volumeABucketsPrefixWithOzoneOwner.add( + prefixBucketNameWithOzoneOwner + i); + addBucketsToCache(volumeName1, prefixBucketNameWithOzoneOwner + i); + } else { + volumeABucketsPrefixWithHadoopOwner.add( + prefixBucketNameWithHadoopOwner + i); + addBucketsToCache(volumeName1, prefixBucketNameWithHadoopOwner + i); + } + } + + String volumeName2 = "volumeB"; + TreeSet volumeBBucketsPrefixWithOzoneOwner = new TreeSet<>(); + TreeSet volumeBBucketsPrefixWithHadoopOwner = new TreeSet<>(); + TestOMRequestUtils.addVolumeToDB(volumeName2, omMetadataManager); + for (int i=1; i<= 100; i++) { + if (i % 2 == 0) { + volumeBBucketsPrefixWithOzoneOwner.add( + prefixBucketNameWithOzoneOwner + i); + addBucketsToCache(volumeName2, prefixBucketNameWithOzoneOwner + i); + } else { + volumeBBucketsPrefixWithHadoopOwner.add( + prefixBucketNameWithHadoopOwner + i); + addBucketsToCache(volumeName2, prefixBucketNameWithHadoopOwner + i); + } + } + + // List all buckets which have prefix ozoneBucket + List omBucketInfoList = + omMetadataManager.listBuckets(volumeName1, + null, prefixBucketNameWithOzoneOwner, 100); + + Assert.assertEquals(omBucketInfoList.size(), 50); + + for (OmBucketInfo omBucketInfo : omBucketInfoList) { + Assert.assertTrue(omBucketInfo.getBucketName().startsWith( + prefixBucketNameWithOzoneOwner)); + } + + + String startBucket = prefixBucketNameWithOzoneOwner + 10; + omBucketInfoList = + omMetadataManager.listBuckets(volumeName1, + startBucket, prefixBucketNameWithOzoneOwner, + 100); + + Assert.assertEquals(volumeABucketsPrefixWithOzoneOwner.tailSet( + startBucket).size() - 1, omBucketInfoList.size()); + + startBucket = prefixBucketNameWithOzoneOwner + 38; + omBucketInfoList = + omMetadataManager.listBuckets(volumeName1, + startBucket, prefixBucketNameWithOzoneOwner, + 100); + + Assert.assertEquals(volumeABucketsPrefixWithOzoneOwner.tailSet( + startBucket).size() - 1, omBucketInfoList.size()); + + for (OmBucketInfo omBucketInfo : omBucketInfoList) { + Assert.assertTrue(omBucketInfo.getBucketName().startsWith( + prefixBucketNameWithOzoneOwner)); + Assert.assertFalse(omBucketInfo.getBucketName().equals( + prefixBucketNameWithOzoneOwner + 10)); + } + + + + omBucketInfoList = omMetadataManager.listBuckets(volumeName2, + null, prefixBucketNameWithHadoopOwner, 100); + + Assert.assertEquals(omBucketInfoList.size(), 50); + + for (OmBucketInfo omBucketInfo : omBucketInfoList) { + Assert.assertTrue(omBucketInfo.getBucketName().startsWith( + prefixBucketNameWithHadoopOwner)); + } + + // Try to get buckets by count 10, like that get all buckets in the + // volumeB with prefixBucketNameWithHadoopOwner. + startBucket = null; + TreeSet expectedBuckets = new TreeSet<>(); + for (int i=0; i<5; i++) { + + omBucketInfoList = omMetadataManager.listBuckets(volumeName2, + startBucket, prefixBucketNameWithHadoopOwner, 10); + + Assert.assertEquals(omBucketInfoList.size(), 10); + + for (OmBucketInfo omBucketInfo : omBucketInfoList) { + expectedBuckets.add(omBucketInfo.getBucketName()); + Assert.assertTrue(omBucketInfo.getBucketName().startsWith( + prefixBucketNameWithHadoopOwner)); + startBucket = omBucketInfo.getBucketName(); + } + } + + + Assert.assertEquals(volumeBBucketsPrefixWithHadoopOwner, expectedBuckets); + // As now we have iterated all 50 buckets, calling next time should + // return empty list. + omBucketInfoList = omMetadataManager.listBuckets(volumeName2, + startBucket, prefixBucketNameWithHadoopOwner, 10); + + Assert.assertEquals(omBucketInfoList.size(), 0); + + } + + + private void addBucketsToCache(String volumeName, String bucketName) { + + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setStorageType(StorageType.DISK) + .setIsVersionEnabled(false) + .build(); + + omMetadataManager.getBucketTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getBucketKey(volumeName, bucketName)), + new CacheValue<>(Optional.of(omBucketInfo), 1)); + } + + @Test + public void testListKeys() throws Exception { + + String volumeNameA = "volumeA"; + String volumeNameB = "volumeB"; + String ozoneBucket = "ozoneBucket"; + String hadoopBucket = "hadoopBucket"; + + + // Create volumes and buckets. + TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); + TestOMRequestUtils.addVolumeToDB(volumeNameB, omMetadataManager); + addBucketsToCache(volumeNameA, ozoneBucket); + addBucketsToCache(volumeNameB, hadoopBucket); + + + String prefixKeyA = "key-a"; + String prefixKeyB = "key-b"; + TreeSet keysASet = new TreeSet<>(); + TreeSet keysBSet = new TreeSet<>(); + for (int i=1; i<= 100; i++) { + if (i % 2 == 0) { + keysASet.add( + prefixKeyA + i); + addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i); + } else { + keysBSet.add( + prefixKeyB + i); + addKeysToOM(volumeNameA, hadoopBucket, prefixKeyB + i, i); + } + } + + + TreeSet keysAVolumeBSet = new TreeSet<>(); + TreeSet keysBVolumeBSet = new TreeSet<>(); + for (int i=1; i<= 100; i++) { + if (i % 2 == 0) { + keysAVolumeBSet.add( + prefixKeyA + i); + addKeysToOM(volumeNameB, ozoneBucket, prefixKeyA + i, i); + } else { + keysBVolumeBSet.add( + prefixKeyB + i); + addKeysToOM(volumeNameB, hadoopBucket, prefixKeyB + i, i); + } + } + + + // List all keys which have prefix "key-a" + List omKeyInfoList = + omMetadataManager.listKeys(volumeNameA, ozoneBucket, + null, prefixKeyA, 100); + + Assert.assertEquals(omKeyInfoList.size(), 50); + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + Assert.assertTrue(omKeyInfo.getKeyName().startsWith( + prefixKeyA)); + } + + + String startKey = prefixKeyA + 10; + omKeyInfoList = + omMetadataManager.listKeys(volumeNameA, ozoneBucket, + startKey, prefixKeyA, 100); + + Assert.assertEquals(keysASet.tailSet( + startKey).size() - 1, omKeyInfoList.size()); + + startKey = prefixKeyA + 38; + omKeyInfoList = + omMetadataManager.listKeys(volumeNameA, ozoneBucket, + startKey, prefixKeyA, 100); + + Assert.assertEquals(keysASet.tailSet( + startKey).size() - 1, omKeyInfoList.size()); + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + Assert.assertTrue(omKeyInfo.getKeyName().startsWith( + prefixKeyA)); + Assert.assertFalse(omKeyInfo.getBucketName().equals( + prefixKeyA + 38)); + } + + + + omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, + null, prefixKeyB, 100); + + Assert.assertEquals(omKeyInfoList.size(), 50); + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + Assert.assertTrue(omKeyInfo.getKeyName().startsWith( + prefixKeyB)); + } + + // Try to get keys by count 10, like that get all keys in the + // volumeB/ozoneBucket with "key-a". + startKey = null; + TreeSet expectedKeys = new TreeSet<>(); + for (int i=0; i<5; i++) { + + omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, + startKey, prefixKeyB, 10); + + Assert.assertEquals(10, omKeyInfoList.size()); + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + expectedKeys.add(omKeyInfo.getKeyName()); + Assert.assertTrue(omKeyInfo.getKeyName().startsWith( + prefixKeyB)); + startKey = omKeyInfo.getKeyName(); + } + } + + Assert.assertEquals(expectedKeys, keysBVolumeBSet); + + + // As now we have iterated all 50 buckets, calling next time should + // return empty list. + omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, + startKey, prefixKeyB, 10); + + Assert.assertEquals(omKeyInfoList.size(), 0); + + } + + @Test + public void testListKeysWithFewDeleteEntriesInCache() throws Exception { + String volumeNameA = "volumeA"; + String ozoneBucket = "ozoneBucket"; + + // Create volumes and bucket. + TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); + + addBucketsToCache(volumeNameA, ozoneBucket); + + String prefixKeyA = "key-a"; + TreeSet keysASet = new TreeSet<>(); + TreeSet deleteKeySet = new TreeSet<>(); + + + for (int i=1; i<= 100; i++) { + if (i % 2 == 0) { + keysASet.add( + prefixKeyA + i); + addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i); + } else { + addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i); + String key = omMetadataManager.getOzoneKey(volumeNameA, + ozoneBucket, prefixKeyA + i); + // Mark as deleted in cache. + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(key), + new CacheValue<>(Optional.absent(), 100L)); + deleteKeySet.add(key); + } + } + + // Now list keys which match with prefixKeyA. + List omKeyInfoList = + omMetadataManager.listKeys(volumeNameA, ozoneBucket, + null, prefixKeyA, 100); + + // As in total 100, 50 are marked for delete. It should list only 50 keys. + Assert.assertEquals(50, omKeyInfoList.size()); + + TreeSet expectedKeys = new TreeSet<>(); + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + expectedKeys.add(omKeyInfo.getKeyName()); + Assert.assertTrue(omKeyInfo.getKeyName().startsWith(prefixKeyA)); + } + + Assert.assertEquals(expectedKeys, keysASet); + + + // Now get key count by 10. + String startKey = null; + expectedKeys = new TreeSet<>(); + for (int i=0; i<5; i++) { + + omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket, + startKey, prefixKeyA, 10); + + System.out.println(i); + Assert.assertEquals(10, omKeyInfoList.size()); + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + expectedKeys.add(omKeyInfo.getKeyName()); + Assert.assertTrue(omKeyInfo.getKeyName().startsWith( + prefixKeyA)); + startKey = omKeyInfo.getKeyName(); + } + } + + Assert.assertEquals(keysASet, expectedKeys); + + + // As now we have iterated all 50 buckets, calling next time should + // return empty list. + omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket, + startKey, prefixKeyA, 10); + + Assert.assertEquals(omKeyInfoList.size(), 0); + + + + } + + private void addKeysToOM(String volumeName, String bucketName, + String keyName, int i) throws Exception { + + if (i%2== 0) { + TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + 1000L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); + } else { + TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + omMetadataManager); + } + } + +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index 88848f8b2a8f..472d46a289e1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -120,7 +120,52 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, OMMetadataManager omMetadataManager) throws Exception { - OmKeyInfo.Builder builder = new OmKeyInfo.Builder() + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor); + + if (openKeyTable) { + omMetadataManager.getOpenKeyTable().put( + omMetadataManager.getOpenKey(volumeName, bucketName, keyName, + clientID), omKeyInfo); + } else { + omMetadataManager.getKeyTable().put(omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName), omKeyInfo); + } + + } + + /** + * Add key entry to key table cache. + * @param volumeName + * @param bucketName + * @param keyName + * @param replicationType + * @param replicationFactor + * @param omMetadataManager + */ + @SuppressWarnings("parameterNumber") + public static void addKeyToTableCache(String volumeName, + String bucketName, + String keyName, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, + OMMetadataManager omMetadataManager) { + + + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor); + + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName)), new CacheValue<>(Optional.of(omKeyInfo), + 1L)); + + } + + private OmKeyInfo createKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor) { + return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -130,19 +175,10 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, .setModificationTime(Time.now()) .setDataSize(1000L) .setReplicationType(replicationType) - .setReplicationFactor(replicationFactor); - - if (openKeyTable) { - omMetadataManager.getOpenKeyTable().put( - omMetadataManager.getOpenKey(volumeName, bucketName, keyName, - clientID), builder.build()); - } else { - omMetadataManager.getKeyTable().put(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName), builder.build()); - } - + .setReplicationFactor(replicationFactor).build(); } + /** * Create OmKeyInfo. */ diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-lib-current/pom.xml index 5953acb62ef7..1645ccc82b24 100644 --- a/hadoop-ozone/ozonefs-lib-current/pom.xml +++ b/hadoop-ozone/ozonefs-lib-current/pom.xml @@ -85,6 +85,9 @@ + + ozone-default-generated.xml + diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 5e1a50d31c23..9ea03b545f30 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -91,9 +91,11 @@ private static OzoneConfiguration createConf() { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); Thread.currentThread().setContextClassLoader(null); - OzoneConfiguration conf = new OzoneConfiguration(); - Thread.currentThread().setContextClassLoader(contextClassLoader); - return conf; + try { + return new OzoneConfiguration(); + } finally { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } } public BasicOzoneClientAdapterImpl(OzoneConfiguration conf, String volumeStr, @@ -109,38 +111,39 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); Thread.currentThread().setContextClassLoader(null); - OzoneConfiguration conf = OzoneConfiguration.of(hadoopConf); - if (omHost == null && OmUtils.isServiceIdsDefined(conf)) { - // When the host name or service id isn't given - // but ozone.om.service.ids is defined, declare failure. + try { + OzoneConfiguration conf = OzoneConfiguration.of(hadoopConf); - // This is a safety precaution that prevents the client from - // accidentally failing over to an unintended OM. - throw new IllegalArgumentException("Service ID or host name must not" - + " be omitted when ozone.om.service.ids is defined."); - } + if (omHost == null && OmUtils.isServiceIdsDefined(conf)) { + // When the host name or service id isn't given + // but ozone.om.service.ids is defined, declare failure. - if (omPort != -1) { - // When the port number is specified, perform the following check - if (OmUtils.isOmHAServiceId(conf, omHost)) { - // If omHost is a service id, it shouldn't use a port - throw new IllegalArgumentException("Port " + omPort + - " specified in URI but host '" + omHost + "' is " - + "a logical (HA) OzoneManager and does not use port information."); + // This is a safety precaution that prevents the client from + // accidentally failing over to an unintended OM. + throw new IllegalArgumentException("Service ID or host name must not" + + " be omitted when ozone.om.service.ids is defined."); } - } else { - // When port number is not specified, read it from config - omPort = OmUtils.getOmRpcPort(conf); - } - SecurityConfig secConfig = new SecurityConfig(conf); + if (omPort != -1) { + // When the port number is specified, perform the following check + if (OmUtils.isOmHAServiceId(conf, omHost)) { + // If omHost is a service id, it shouldn't use a port + throw new IllegalArgumentException("Port " + omPort + + " specified in URI but host '" + omHost + "' is a " + + "logical (HA) OzoneManager and does not use port information."); + } + } else { + // When port number is not specified, read it from config + omPort = OmUtils.getOmRpcPort(conf); + } - if (secConfig.isSecurityEnabled()) { - this.securityEnabled = true; - } + SecurityConfig secConfig = new SecurityConfig(conf); + + if (secConfig.isSecurityEnabled()) { + this.securityEnabled = true; + } - try { String replicationTypeConf = conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE, OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 4147c8ff4e3e..298fd2e69373 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -425,7 +425,9 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { DeleteIterator iterator = new DeleteIterator(f, recursive); return iterator.iterate(); } catch (FileNotFoundException e) { - LOG.debug("Couldn't delete {} - does not exist", f); + if (LOG.isDebugEnabled()) { + LOG.debug("Couldn't delete {} - does not exist", f); + } return false; } } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 33dee87fa623..0514bd728b74 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -63,7 +63,7 @@ public DelegationTokenIssuer[] getAdditionalTokenIssuers() try { keyProvider = getKeyProvider(); } catch (IOException ioe) { - LOG.error("Error retrieving KeyProvider.", ioe); + LOG.debug("Error retrieving KeyProvider.", ioe); return null; } if (keyProvider instanceof DelegationTokenIssuer) { diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 825e65ccf937..716e175a63ed 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -17,7 +17,6 @@ org.apache.hadoop hadoop-main-ozone 0.5.0-SNAPSHOT - ../pom.ozone.xml hadoop-ozone 0.5.0-SNAPSHOT diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index ecd47f247776..2d29d3f94044 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,9 +32,11 @@ import java.io.InputStream; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.zip.GZIPOutputStream; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -83,6 +85,65 @@ public File getReconDbDir(Configuration conf, String dirConfigKey) { return getOzoneMetaDirPath(conf); } + /** + * Given a source directory, create a tar.gz file from it. + * + * @param sourcePath the path to the directory to be archived. + * @return tar.gz file + * @throws IOException + */ + public static File createTarFile(Path sourcePath) throws IOException { + TarArchiveOutputStream tarOs = null; + try { + String sourceDir = sourcePath.toString(); + String fileName = sourceDir.concat(".tar.gz"); + FileOutputStream fileOutputStream = new FileOutputStream(fileName); + GZIPOutputStream gzipOutputStream = + new GZIPOutputStream(new BufferedOutputStream(fileOutputStream)); + tarOs = new TarArchiveOutputStream(gzipOutputStream); + File folder = new File(sourceDir); + File[] filesInDir = folder.listFiles(); + if (filesInDir != null) { + for (File file : filesInDir) { + addFilesToArchive(file.getName(), file, tarOs); + } + } + return new File(fileName); + } finally { + try { + org.apache.hadoop.io.IOUtils.closeStream(tarOs); + } catch (Exception e) { + LOG.error("Exception encountered when closing " + + "TAR file output stream: " + e); + } + } + } + + private static void addFilesToArchive(String source, File file, + TarArchiveOutputStream + tarFileOutputStream) + throws IOException { + tarFileOutputStream.putArchiveEntry(new TarArchiveEntry(file, source)); + if (file.isFile()) { + FileInputStream fileInputStream = new FileInputStream(file); + BufferedInputStream bufferedInputStream = + new BufferedInputStream(fileInputStream); + org.apache.commons.compress.utils.IOUtils.copy(bufferedInputStream, + tarFileOutputStream); + tarFileOutputStream.closeArchiveEntry(); + fileInputStream.close(); + } else if (file.isDirectory()) { + tarFileOutputStream.closeArchiveEntry(); + File[] filesInDir = file.listFiles(); + if (filesInDir != null) { + for (File cFile : filesInDir) { + addFilesToArchive(cFile.getAbsolutePath(), cFile, + tarFileOutputStream); + } + } + } + } + /** * Untar DB snapshot tar file to recon OM snapshot directory. * diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index 6d19dacebac8..6bb8993decb9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon; +import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -27,15 +28,16 @@ import java.io.BufferedWriter; import java.io.File; import java.io.FileInputStream; +import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; import java.nio.charset.Charset; import java.nio.file.Paths; +import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; import org.apache.http.HttpEntity; import org.apache.http.StatusLine; import org.apache.http.client.methods.CloseableHttpResponse; @@ -66,6 +68,44 @@ public void testGetReconDbDir() throws Exception { Assert.assertEquals(filePath, file.getAbsolutePath()); } + @Test + public void testCreateTarFile() throws Exception { + + File tempSnapshotDir = null; + FileInputStream fis = null; + FileOutputStream fos = null; + File tarFile = null; + + try { + String testDirName = System.getProperty("java.io.tmpdir"); + if (!testDirName.endsWith("/")) { + testDirName += "/"; + } + testDirName += "TestCreateTarFile_Dir" + System.currentTimeMillis(); + tempSnapshotDir = new File(testDirName); + tempSnapshotDir.mkdirs(); + + File file = new File(testDirName + "/temp1.txt"); + FileWriter writer = new FileWriter(file); + writer.write("Test data 1"); + writer.close(); + + file = new File(testDirName + "/temp2.txt"); + writer = new FileWriter(file); + writer.write("Test data 2"); + writer.close(); + + tarFile = createTarFile(Paths.get(testDirName)); + Assert.assertNotNull(tarFile); + + } finally { + org.apache.hadoop.io.IOUtils.closeStream(fis); + org.apache.hadoop.io.IOUtils.closeStream(fos); + FileUtils.deleteDirectory(tempSnapshotDir); + FileUtils.deleteQuietly(tarFile); + } + } + @Test public void testUntarCheckpointFile() throws Exception { @@ -87,7 +127,7 @@ public void testUntarCheckpointFile() throws Exception { writer.close(); //Create test tar file. - File tarFile = OmUtils.createTarFile(newDir.toPath()); + File tarFile = createTarFile(newDir.toPath()); File outputDir = folder.newFolder(); new ReconUtils().untarCheckpointFile(tarFile, outputDir.toPath()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index a2eb7f45a931..63b414071d77 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; +import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -41,7 +42,6 @@ import java.nio.file.Paths; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -96,7 +96,7 @@ public void testUpdateReconOmDBWithNewSnapshot() throws Exception { DBCheckpoint checkpoint = omMetadataManager.getStore() .getCheckpoint(true); - File tarFile = OmUtils.createTarFile(checkpoint.getCheckpointLocation()); + File tarFile = createTarFile(checkpoint.getCheckpointLocation()); InputStream inputStream = new FileInputStream(tarFile); ReconUtils reconUtilsMock = getMockReconUtils(); when(reconUtilsMock.makeHttpCall(any(), anyString())) @@ -147,7 +147,7 @@ public void testGetOzoneManagerDBSnapshot() throws Exception { writer.close(); //Create test tar file. - File tarFile = OmUtils.createTarFile(checkpointDir.toPath()); + File tarFile = createTarFile(checkpointDir.toPath()); InputStream fileInputStream = new FileInputStream(tarFile); ReconUtils reconUtilsMock = getMockReconUtils(); when(reconUtilsMock.makeHttpCall(any(), anyString())) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java index 9b65b387a792..82ffa0c5c430 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java @@ -110,10 +110,14 @@ public void parse() throws Exception { canonicalRequest = buildCanonicalRequest(); strToSign.append(hash(canonicalRequest)); - LOG.debug("canonicalRequest:[{}]", canonicalRequest); + if (LOG.isDebugEnabled()) { + LOG.debug("canonicalRequest:[{}]", canonicalRequest); + } - headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k, - headerMap.get(k))); + if (LOG.isTraceEnabled()) { + headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k, + headerMap.get(k))); + } LOG.debug("StringToSign:[{}]", strToSign); stringToSign = strToSign.toString(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java index abaca0390824..d42c005e5831 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java @@ -86,8 +86,9 @@ private OzoneClient getClient(OzoneConfiguration config) throws IOException { identifier.setSignature(v4RequestParser.getSignature()); identifier.setAwsAccessId(v4RequestParser.getAwsAccessId()); identifier.setOwner(new Text(v4RequestParser.getAwsAccessId())); - - LOG.trace("Adding token for service:{}", omService); + if (LOG.isTraceEnabled()) { + LOG.trace("Adding token for service:{}", omService); + } Token token = new Token(identifier.getBytes(), identifier.getSignature().getBytes(UTF_8), identifier.getKind(), diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java index 43f335ede6f5..588dafae86a6 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java @@ -42,7 +42,9 @@ public class OS3ExceptionMapper implements ExceptionMapper { @Override public Response toResponse(OS3Exception exception) { - LOG.debug("Returning exception. ex: {}", exception.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Returning exception. ex: {}", exception.toString()); + } exception.setRequestId(requestIdentifier.getRequestId()); return Response.status(exception.getHttpCode()) .entity(exception.toXml()).build(); diff --git a/pom.ozone.xml b/pom.xml similarity index 100% rename from pom.ozone.xml rename to pom.xml